hs-test: fix CPU alloc when running in parallel
Type: test Change-Id: I6062eddffb938880d9ec004c8418a9a731891989 Signed-off-by: Adrian Villin <avillin@cisco.com>
This commit is contained in:

committed by
Florin Coras

parent
f5df854389
commit
0df582e8ec
@ -239,6 +239,7 @@ func (c *Container) newVppInstance(cpus []int, additionalConfigs ...Stanza) (*Vp
|
|||||||
vpp := new(VppInstance)
|
vpp := new(VppInstance)
|
||||||
vpp.container = c
|
vpp.container = c
|
||||||
vpp.cpus = cpus
|
vpp.cpus = cpus
|
||||||
|
c.suite.vppContainerCount += 1
|
||||||
vpp.additionalConfig = append(vpp.additionalConfig, additionalConfigs...)
|
vpp.additionalConfig = append(vpp.additionalConfig, additionalConfigs...)
|
||||||
c.vppInstance = vpp
|
c.vppInstance = vpp
|
||||||
return vpp, nil
|
return vpp, nil
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
. "github.com/onsi/ginkgo/v2"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
@ -16,26 +17,31 @@ type CpuContext struct {
|
|||||||
cpus []int
|
cpus []int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CpuContext) Release() {
|
|
||||||
c.cpuAllocator.cpus = append(c.cpuAllocator.cpus, c.cpus...)
|
|
||||||
c.cpus = c.cpus[:0] // empty the list
|
|
||||||
}
|
|
||||||
|
|
||||||
type CpuAllocatorT struct {
|
type CpuAllocatorT struct {
|
||||||
cpus []int
|
cpus []int
|
||||||
}
|
}
|
||||||
|
|
||||||
var cpuAllocator *CpuAllocatorT = nil
|
var cpuAllocator *CpuAllocatorT = nil
|
||||||
|
|
||||||
func (c *CpuAllocatorT) Allocate(nCpus int) (*CpuContext, error) {
|
func (c *CpuAllocatorT) Allocate(vppContainerCount int, nCpus int) (*CpuContext, error) {
|
||||||
var cpuCtx CpuContext
|
var cpuCtx CpuContext
|
||||||
|
maxCpu := GinkgoParallelProcess() * 2 * nCpus
|
||||||
if len(c.cpus) < nCpus {
|
minCpu := (GinkgoParallelProcess() - 1) * 2 * nCpus
|
||||||
return nil, fmt.Errorf("could not allocate %d CPUs; available: %d", nCpus, len(c.cpus))
|
if len(c.cpus) < maxCpu {
|
||||||
|
vppContainerCount += 1
|
||||||
|
err := fmt.Errorf("could not allocate %d CPUs; available: %d; attempted to allocate cores %d-%d",
|
||||||
|
nCpus*vppContainerCount, len(c.cpus), minCpu, minCpu+nCpus*vppContainerCount)
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
cpuCtx.cpus = c.cpus[0:nCpus]
|
if vppContainerCount == 0 {
|
||||||
|
cpuCtx.cpus = c.cpus[minCpu : maxCpu-nCpus]
|
||||||
|
} else if vppContainerCount == 1 {
|
||||||
|
cpuCtx.cpus = c.cpus[minCpu+nCpus : maxCpu]
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("too many VPP containers; CPU allocation for >2 VPP containers is not implemented yet")
|
||||||
|
}
|
||||||
|
|
||||||
cpuCtx.cpuAllocator = c
|
cpuCtx.cpuAllocator = c
|
||||||
c.cpus = c.cpus[nCpus:]
|
|
||||||
return &cpuCtx, nil
|
return &cpuCtx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,7 +21,9 @@ func EchoBuiltinTest(s *VethsSuite) {
|
|||||||
s.assertNotContains(o, "failed:")
|
s.assertNotContains(o, "failed:")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// unstable with multiple workers
|
||||||
func TcpWithLossTest(s *VethsSuite) {
|
func TcpWithLossTest(s *VethsSuite) {
|
||||||
|
s.SkipIfMultiWorker()
|
||||||
serverVpp := s.getContainerByName("server-vpp").vppInstance
|
serverVpp := s.getContainerByName("server-vpp").vppInstance
|
||||||
|
|
||||||
serverVeth := s.getInterfaceByName(serverInterfaceName)
|
serverVeth := s.getInterfaceByName(serverInterfaceName)
|
||||||
|
@ -30,18 +30,19 @@ var nConfiguredCpus = flag.Int("cpus", 1, "number of CPUs assigned to vpp")
|
|||||||
var vppSourceFileDir = flag.String("vppsrc", "", "vpp source file directory")
|
var vppSourceFileDir = flag.String("vppsrc", "", "vpp source file directory")
|
||||||
|
|
||||||
type HstSuite struct {
|
type HstSuite struct {
|
||||||
containers map[string]*Container
|
containers map[string]*Container
|
||||||
volumes []string
|
vppContainerCount int
|
||||||
netConfigs []NetConfig
|
volumes []string
|
||||||
netInterfaces map[string]*NetInterface
|
netConfigs []NetConfig
|
||||||
ip4AddrAllocator *Ip4AddressAllocator
|
netInterfaces map[string]*NetInterface
|
||||||
testIds map[string]string
|
ip4AddrAllocator *Ip4AddressAllocator
|
||||||
cpuAllocator *CpuAllocatorT
|
testIds map[string]string
|
||||||
cpuContexts []*CpuContext
|
cpuAllocator *CpuAllocatorT
|
||||||
cpuPerVpp int
|
cpuContexts []*CpuContext
|
||||||
pid string
|
cpuPerVpp int
|
||||||
logger *log.Logger
|
pid string
|
||||||
logFile *os.File
|
logger *log.Logger
|
||||||
|
logFile *os.File
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *HstSuite) SetupSuite() {
|
func (s *HstSuite) SetupSuite() {
|
||||||
@ -61,7 +62,7 @@ func (s *HstSuite) SetupSuite() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *HstSuite) AllocateCpus() []int {
|
func (s *HstSuite) AllocateCpus() []int {
|
||||||
cpuCtx, err := s.cpuAllocator.Allocate(s.cpuPerVpp)
|
cpuCtx, err := s.cpuAllocator.Allocate(s.vppContainerCount, s.cpuPerVpp)
|
||||||
s.assertNil(err)
|
s.assertNil(err)
|
||||||
s.AddCpuContext(cpuCtx)
|
s.AddCpuContext(cpuCtx)
|
||||||
return cpuCtx.cpus
|
return cpuCtx.cpus
|
||||||
@ -82,9 +83,6 @@ func (s *HstSuite) TearDownTest() {
|
|||||||
if *isPersistent {
|
if *isPersistent {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, c := range s.cpuContexts {
|
|
||||||
c.Release()
|
|
||||||
}
|
|
||||||
s.resetContainers()
|
s.resetContainers()
|
||||||
s.removeVolumes()
|
s.removeVolumes()
|
||||||
s.ip4AddrAllocator.deleteIpAddresses()
|
s.ip4AddrAllocator.deleteIpAddresses()
|
||||||
@ -98,6 +96,7 @@ func (s *HstSuite) skipIfUnconfiguring() {
|
|||||||
|
|
||||||
func (s *HstSuite) SetupTest() {
|
func (s *HstSuite) SetupTest() {
|
||||||
s.log("Test Setup")
|
s.log("Test Setup")
|
||||||
|
s.vppContainerCount = 0
|
||||||
s.skipIfUnconfiguring()
|
s.skipIfUnconfiguring()
|
||||||
s.setupVolumes()
|
s.setupVolumes()
|
||||||
s.setupContainers()
|
s.setupContainers()
|
||||||
@ -157,7 +156,7 @@ func (s *HstSuite) hstFail() {
|
|||||||
for _, container := range s.containers {
|
for _, container := range s.containers {
|
||||||
out, err := container.log(20)
|
out, err := container.log(20)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("An error occured while obtaining '%s' container logs: %s\n", container.name, fmt.Sprint(err))
|
s.log("An error occured while obtaining '" + container.name + "' container logs: " + fmt.Sprint(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
s.log("\nvvvvvvvvvvvvvvv " +
|
s.log("\nvvvvvvvvvvvvvvv " +
|
||||||
|
@ -272,7 +272,9 @@ func runNginxPerf(s *NoTopoSuite, mode, ab_or_wrk string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// unstable with multiple workers
|
||||||
func NginxPerfCpsTest(s *NoTopoSuite) {
|
func NginxPerfCpsTest(s *NoTopoSuite) {
|
||||||
|
s.SkipIfMultiWorker()
|
||||||
s.assertNil(runNginxPerf(s, "cps", "ab"))
|
s.assertNil(runNginxPerf(s, "cps", "ab"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,7 +8,9 @@ func init() {
|
|||||||
registerNginxTests(MirroringTest)
|
registerNginxTests(MirroringTest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// broken when CPUS > 1
|
||||||
func MirroringTest(s *NginxSuite) {
|
func MirroringTest(s *NginxSuite) {
|
||||||
|
s.SkipIfMultiWorker()
|
||||||
proxyAddress := s.getInterfaceByName(mirroringClientInterfaceName).peer.ip4AddressString()
|
proxyAddress := s.getInterfaceByName(mirroringClientInterfaceName).peer.ip4AddressString()
|
||||||
|
|
||||||
path := "/64B.json"
|
path := "/64B.json"
|
||||||
|
@ -456,6 +456,7 @@ func (vpp *VppInstance) generateCpuConfig() string {
|
|||||||
}
|
}
|
||||||
c.newStanza("cpu").
|
c.newStanza("cpu").
|
||||||
append(fmt.Sprintf("main-core %d", vpp.cpus[0]))
|
append(fmt.Sprintf("main-core %d", vpp.cpus[0]))
|
||||||
|
vpp.getSuite().log(fmt.Sprintf("main-core %d", vpp.cpus[0]))
|
||||||
workers := vpp.cpus[1:]
|
workers := vpp.cpus[1:]
|
||||||
|
|
||||||
if len(workers) > 0 {
|
if len(workers) > 0 {
|
||||||
@ -466,6 +467,7 @@ func (vpp *VppInstance) generateCpuConfig() string {
|
|||||||
s = s + fmt.Sprintf("%d", workers[i])
|
s = s + fmt.Sprintf("%d", workers[i])
|
||||||
}
|
}
|
||||||
c.append(fmt.Sprintf("corelist-workers %s", s))
|
c.append(fmt.Sprintf("corelist-workers %s", s))
|
||||||
|
vpp.getSuite().log("corelist-workers " + s)
|
||||||
}
|
}
|
||||||
return c.close().toString()
|
return c.close().toString()
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user