hs-test: fix CPU alloc when running in parallel

Type: test

Change-Id: I6062eddffb938880d9ec004c8418a9a731891989
Signed-off-by: Adrian Villin <avillin@cisco.com>
This commit is contained in:
Adrian Villin
2024-05-22 09:26:47 -04:00
committed by Florin Coras
parent f5df854389
commit 0df582e8ec
7 changed files with 42 additions and 28 deletions

View File

@ -239,6 +239,7 @@ func (c *Container) newVppInstance(cpus []int, additionalConfigs ...Stanza) (*Vp
vpp := new(VppInstance)
vpp.container = c
vpp.cpus = cpus
c.suite.vppContainerCount += 1
vpp.additionalConfig = append(vpp.additionalConfig, additionalConfigs...)
c.vppInstance = vpp
return vpp, nil

View File

@ -4,6 +4,7 @@ import (
"bufio"
"errors"
"fmt"
. "github.com/onsi/ginkgo/v2"
"os"
"os/exec"
"strings"
@ -16,26 +17,31 @@ type CpuContext struct {
cpus []int
}
func (c *CpuContext) Release() {
c.cpuAllocator.cpus = append(c.cpuAllocator.cpus, c.cpus...)
c.cpus = c.cpus[:0] // empty the list
}
type CpuAllocatorT struct {
cpus []int
}
var cpuAllocator *CpuAllocatorT = nil
func (c *CpuAllocatorT) Allocate(nCpus int) (*CpuContext, error) {
func (c *CpuAllocatorT) Allocate(vppContainerCount int, nCpus int) (*CpuContext, error) {
var cpuCtx CpuContext
if len(c.cpus) < nCpus {
return nil, fmt.Errorf("could not allocate %d CPUs; available: %d", nCpus, len(c.cpus))
maxCpu := GinkgoParallelProcess() * 2 * nCpus
minCpu := (GinkgoParallelProcess() - 1) * 2 * nCpus
if len(c.cpus) < maxCpu {
vppContainerCount += 1
err := fmt.Errorf("could not allocate %d CPUs; available: %d; attempted to allocate cores %d-%d",
nCpus*vppContainerCount, len(c.cpus), minCpu, minCpu+nCpus*vppContainerCount)
return nil, err
}
cpuCtx.cpus = c.cpus[0:nCpus]
if vppContainerCount == 0 {
cpuCtx.cpus = c.cpus[minCpu : maxCpu-nCpus]
} else if vppContainerCount == 1 {
cpuCtx.cpus = c.cpus[minCpu+nCpus : maxCpu]
} else {
return nil, fmt.Errorf("too many VPP containers; CPU allocation for >2 VPP containers is not implemented yet")
}
cpuCtx.cpuAllocator = c
c.cpus = c.cpus[nCpus:]
return &cpuCtx, nil
}

View File

@ -21,7 +21,9 @@ func EchoBuiltinTest(s *VethsSuite) {
s.assertNotContains(o, "failed:")
}
// unstable with multiple workers
func TcpWithLossTest(s *VethsSuite) {
s.SkipIfMultiWorker()
serverVpp := s.getContainerByName("server-vpp").vppInstance
serverVeth := s.getInterfaceByName(serverInterfaceName)

View File

@ -30,18 +30,19 @@ var nConfiguredCpus = flag.Int("cpus", 1, "number of CPUs assigned to vpp")
var vppSourceFileDir = flag.String("vppsrc", "", "vpp source file directory")
type HstSuite struct {
containers map[string]*Container
volumes []string
netConfigs []NetConfig
netInterfaces map[string]*NetInterface
ip4AddrAllocator *Ip4AddressAllocator
testIds map[string]string
cpuAllocator *CpuAllocatorT
cpuContexts []*CpuContext
cpuPerVpp int
pid string
logger *log.Logger
logFile *os.File
containers map[string]*Container
vppContainerCount int
volumes []string
netConfigs []NetConfig
netInterfaces map[string]*NetInterface
ip4AddrAllocator *Ip4AddressAllocator
testIds map[string]string
cpuAllocator *CpuAllocatorT
cpuContexts []*CpuContext
cpuPerVpp int
pid string
logger *log.Logger
logFile *os.File
}
func (s *HstSuite) SetupSuite() {
@ -61,7 +62,7 @@ func (s *HstSuite) SetupSuite() {
}
func (s *HstSuite) AllocateCpus() []int {
cpuCtx, err := s.cpuAllocator.Allocate(s.cpuPerVpp)
cpuCtx, err := s.cpuAllocator.Allocate(s.vppContainerCount, s.cpuPerVpp)
s.assertNil(err)
s.AddCpuContext(cpuCtx)
return cpuCtx.cpus
@ -82,9 +83,6 @@ func (s *HstSuite) TearDownTest() {
if *isPersistent {
return
}
for _, c := range s.cpuContexts {
c.Release()
}
s.resetContainers()
s.removeVolumes()
s.ip4AddrAllocator.deleteIpAddresses()
@ -98,6 +96,7 @@ func (s *HstSuite) skipIfUnconfiguring() {
func (s *HstSuite) SetupTest() {
s.log("Test Setup")
s.vppContainerCount = 0
s.skipIfUnconfiguring()
s.setupVolumes()
s.setupContainers()
@ -157,7 +156,7 @@ func (s *HstSuite) hstFail() {
for _, container := range s.containers {
out, err := container.log(20)
if err != nil {
fmt.Printf("An error occured while obtaining '%s' container logs: %s\n", container.name, fmt.Sprint(err))
s.log("An error occured while obtaining '" + container.name + "' container logs: " + fmt.Sprint(err))
continue
}
s.log("\nvvvvvvvvvvvvvvv " +

View File

@ -272,7 +272,9 @@ func runNginxPerf(s *NoTopoSuite, mode, ab_or_wrk string) error {
return nil
}
// unstable with multiple workers
func NginxPerfCpsTest(s *NoTopoSuite) {
s.SkipIfMultiWorker()
s.assertNil(runNginxPerf(s, "cps", "ab"))
}

View File

@ -8,7 +8,9 @@ func init() {
registerNginxTests(MirroringTest)
}
// broken when CPUS > 1
func MirroringTest(s *NginxSuite) {
s.SkipIfMultiWorker()
proxyAddress := s.getInterfaceByName(mirroringClientInterfaceName).peer.ip4AddressString()
path := "/64B.json"

View File

@ -456,6 +456,7 @@ func (vpp *VppInstance) generateCpuConfig() string {
}
c.newStanza("cpu").
append(fmt.Sprintf("main-core %d", vpp.cpus[0]))
vpp.getSuite().log(fmt.Sprintf("main-core %d", vpp.cpus[0]))
workers := vpp.cpus[1:]
if len(workers) > 0 {
@ -466,6 +467,7 @@ func (vpp *VppInstance) generateCpuConfig() string {
s = s + fmt.Sprintf("%d", workers[i])
}
c.append(fmt.Sprintf("corelist-workers %s", s))
vpp.getSuite().log("corelist-workers " + s)
}
return c.close().toString()
}