hs-test: Add CPU pinning test suite

Type: test

Added suite to verify that VPP launches with provided
CPU pinning configurations. CPU configuration is
specified per-test.

Change-Id: Ic283339676d3b24636fc21156a09a192c1a8d8da
Signed-off-by: Hadi Rayan Al-Sandid <halsandi@cisco.com>
This commit is contained in:
Hadi Rayan Al-Sandid
2024-06-24 10:28:58 +02:00
committed by Florin Coras
parent c44fa9355b
commit e0e85134ad
6 changed files with 201 additions and 12 deletions

View File

@ -0,0 +1,30 @@
package main
import (
. "fd.io/hs-test/infra"
)
func init() {
RegisterCpuPinningSoloTests(DefaultCpuConfigurationTest, SkipCoresTest)
}
// TODO: Add more CPU configuration tests
func DefaultCpuConfigurationTest(s *CpuPinningSuite) {
vpp := s.GetContainerByName(SingleTopoContainerVpp).VppInstance
s.AssertNil(vpp.Start())
}
func SkipCoresTest(s *CpuPinningSuite) {
skipCoresConfiguration := VppCpuConfig{
PinMainCpu: true,
PinWorkersCorelist: true,
SkipCores: 1,
}
vpp := s.GetContainerByName(SingleTopoContainerVpp).VppInstance
vpp.CpuConfig = skipCoresConfiguration
s.AssertNil(vpp.Start())
}

View File

@ -249,6 +249,7 @@ func (c *Container) newVppInstance(cpus []int, additionalConfigs ...Stanza) (*Vp
vpp := new(VppInstance)
vpp.Container = c
vpp.Cpus = cpus
vpp.setDefaultCpuConfig()
vpp.AdditionalConfig = append(vpp.AdditionalConfig, additionalConfigs...)
c.VppInstance = vpp
return vpp, nil

View File

@ -247,6 +247,16 @@ func (s *HstSuite) SkipIfMultiWorker(args ...any) {
}
}
func (s *HstSuite) SkipIfNotEnoughAvailableCpus(containerCount int, nCpus int) bool {
MaxRequestedCpu := (GinkgoParallelProcess() * containerCount * nCpus)
if len(s.CpuAllocator.cpus)-1 < MaxRequestedCpu {
s.Skip(fmt.Sprintf("test case cannot allocate requested cpus (%d cpus * %d containers)", nCpus, containerCount))
}
return true
}
func (s *HstSuite) SkipUnlessExtendedTestsBuilt() {
imageName := "hs-test/nginx-http3"

View File

@ -0,0 +1,101 @@
package hst
import (
"fmt"
. "github.com/onsi/ginkgo/v2"
"reflect"
"runtime"
"strings"
)
var cpuPinningTests = map[string][]func(s *CpuPinningSuite){}
var cpuPinningSoloTests = map[string][]func(s *CpuPinningSuite){}
type CpuPinningSuite struct {
HstSuite
}
func RegisterCpuPinningTests(tests ...func(s *CpuPinningSuite)) {
cpuPinningTests[getTestFilename()] = tests
}
func RegisterCpuPinningSoloTests(tests ...func(s *CpuPinningSuite)) {
cpuPinningSoloTests[getTestFilename()] = tests
}
func (s *CpuPinningSuite) SetupSuite() {
s.HstSuite.SetupSuite()
s.LoadNetworkTopology("tap")
s.LoadContainerTopology("singleCpuPinning")
}
func (s *CpuPinningSuite) SetupTest() {
// Skip if we cannot allocate 3 CPUs for test container
s.SkipIfNotEnoughAvailableCpus(1, 3)
s.CpuPerVpp = 3
s.HstSuite.SetupTest()
container := s.GetContainerByName(SingleTopoContainerVpp)
vpp, err := container.newVppInstance(container.AllocatedCpus)
s.AssertNotNil(vpp, fmt.Sprint(err))
}
var _ = Describe("CpuPinningSuite", Ordered, ContinueOnFailure, func() {
var s CpuPinningSuite
BeforeAll(func() {
s.SetupSuite()
})
BeforeEach(func() {
s.SetupTest()
})
AfterAll(func() {
s.TearDownSuite()
})
AfterEach(func() {
s.TearDownTest()
})
// https://onsi.github.io/ginkgo/#dynamically-generating-specs
for filename, tests := range cpuPinningTests {
for _, test := range tests {
test := test
pc := reflect.ValueOf(test).Pointer()
funcValue := runtime.FuncForPC(pc)
testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
It(testName, func(ctx SpecContext) {
s.Log(testName + ": BEGIN")
test(&s)
}, SpecTimeout(SuiteTimeout))
}
}
})
var _ = Describe("CpuPinningSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
var s CpuPinningSuite
BeforeAll(func() {
s.SetupSuite()
})
BeforeEach(func() {
s.SetupTest()
})
AfterAll(func() {
s.TearDownSuite()
})
AfterEach(func() {
s.TearDownTest()
})
for filename, tests := range cpuPinningSoloTests {
for _, test := range tests {
test := test
pc := reflect.ValueOf(test).Pointer()
funcValue := runtime.FuncForPC(pc)
testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
It(testName, Label("SOLO"), func(ctx SpecContext) {
s.Log(testName + ": BEGIN")
test(&s)
}, SpecTimeout(SuiteTimeout))
}
}
})

View File

@ -88,6 +88,13 @@ type VppInstance struct {
Connection *core.Connection
ApiStream api.Stream
Cpus []int
CpuConfig VppCpuConfig
}
type VppCpuConfig struct {
PinMainCpu bool
PinWorkersCorelist bool
SkipCores int
}
func (vpp *VppInstance) getSuite() *HstSuite {
@ -131,7 +138,7 @@ func (vpp *VppInstance) Start() error {
defaultApiSocketFilePath,
defaultLogFilePath,
)
configContent += vpp.generateCpuConfig()
configContent += vpp.generateVPPCpuConfig()
for _, c := range vpp.AdditionalConfig {
configContent += c.ToString()
}
@ -476,18 +483,41 @@ func (vpp *VppInstance) Disconnect() {
vpp.ApiStream.Close()
}
func (vpp *VppInstance) generateCpuConfig() string {
func (vpp *VppInstance) setDefaultCpuConfig() {
vpp.CpuConfig.PinMainCpu = true
vpp.CpuConfig.PinWorkersCorelist = true
vpp.CpuConfig.SkipCores = 0
}
func (vpp *VppInstance) generateVPPCpuConfig() string {
var c Stanza
var s string
startCpu := 0
if len(vpp.Cpus) < 1 {
return ""
}
c.NewStanza("cpu").
Append(fmt.Sprintf("main-core %d", vpp.Cpus[0]))
vpp.getSuite().Log(fmt.Sprintf("main-core %d", vpp.Cpus[0]))
workers := vpp.Cpus[1:]
c.NewStanza("cpu")
// If skip-cores is valid, use as start value to assign main/workers CPUs
if vpp.CpuConfig.SkipCores != 0 {
c.Append(fmt.Sprintf("skip-cores %d", vpp.CpuConfig.SkipCores))
vpp.getSuite().Log(fmt.Sprintf("skip-cores %d", vpp.CpuConfig.SkipCores))
}
if len(vpp.Cpus) > vpp.CpuConfig.SkipCores {
startCpu = vpp.CpuConfig.SkipCores
}
if vpp.CpuConfig.PinMainCpu {
c.Append(fmt.Sprintf("main-core %d", vpp.Cpus[startCpu]))
vpp.getSuite().Log(fmt.Sprintf("main-core %d", vpp.Cpus[startCpu]))
}
workers := vpp.Cpus[startCpu+1:]
if len(workers) > 0 {
if vpp.CpuConfig.PinWorkersCorelist {
for i := 0; i < len(workers); i++ {
if i != 0 {
s = s + ", "
@ -496,6 +526,12 @@ func (vpp *VppInstance) generateCpuConfig() string {
}
c.Append(fmt.Sprintf("corelist-workers %s", s))
vpp.getSuite().Log("corelist-workers " + s)
} else {
s = fmt.Sprintf("%d", len(workers))
c.Append(fmt.Sprintf("workers %s", s))
vpp.getSuite().Log("workers " + s)
}
}
return c.Close().ToString()
}

View File

@ -0,0 +1,11 @@
---
volumes:
- volume: &shared-vol
host-dir: "$HST_VOLUME_DIR/shared-vol"
containers:
- name: "vpp"
volumes:
- <<: *shared-vol
container-dir: "/tmp/vpp"
is-default-work-dir: true