hs-test: Add CPU pinning test suite
Type: test Added suite to verify that VPP launches with provided CPU pinning configurations. CPU configuration is specified per-test. Change-Id: Ic283339676d3b24636fc21156a09a192c1a8d8da Signed-off-by: Hadi Rayan Al-Sandid <halsandi@cisco.com>
This commit is contained in:

committed by
Florin Coras

parent
c44fa9355b
commit
e0e85134ad
30
extras/hs-test/cpu_pinning_test.go
Normal file
30
extras/hs-test/cpu_pinning_test.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "fd.io/hs-test/infra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
RegisterCpuPinningSoloTests(DefaultCpuConfigurationTest, SkipCoresTest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Add more CPU configuration tests
|
||||||
|
|
||||||
|
func DefaultCpuConfigurationTest(s *CpuPinningSuite) {
|
||||||
|
vpp := s.GetContainerByName(SingleTopoContainerVpp).VppInstance
|
||||||
|
s.AssertNil(vpp.Start())
|
||||||
|
}
|
||||||
|
|
||||||
|
func SkipCoresTest(s *CpuPinningSuite) {
|
||||||
|
|
||||||
|
skipCoresConfiguration := VppCpuConfig{
|
||||||
|
PinMainCpu: true,
|
||||||
|
PinWorkersCorelist: true,
|
||||||
|
SkipCores: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
vpp := s.GetContainerByName(SingleTopoContainerVpp).VppInstance
|
||||||
|
vpp.CpuConfig = skipCoresConfiguration
|
||||||
|
|
||||||
|
s.AssertNil(vpp.Start())
|
||||||
|
}
|
@ -249,6 +249,7 @@ func (c *Container) newVppInstance(cpus []int, additionalConfigs ...Stanza) (*Vp
|
|||||||
vpp := new(VppInstance)
|
vpp := new(VppInstance)
|
||||||
vpp.Container = c
|
vpp.Container = c
|
||||||
vpp.Cpus = cpus
|
vpp.Cpus = cpus
|
||||||
|
vpp.setDefaultCpuConfig()
|
||||||
vpp.AdditionalConfig = append(vpp.AdditionalConfig, additionalConfigs...)
|
vpp.AdditionalConfig = append(vpp.AdditionalConfig, additionalConfigs...)
|
||||||
c.VppInstance = vpp
|
c.VppInstance = vpp
|
||||||
return vpp, nil
|
return vpp, nil
|
||||||
|
@ -247,6 +247,16 @@ func (s *HstSuite) SkipIfMultiWorker(args ...any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *HstSuite) SkipIfNotEnoughAvailableCpus(containerCount int, nCpus int) bool {
|
||||||
|
MaxRequestedCpu := (GinkgoParallelProcess() * containerCount * nCpus)
|
||||||
|
|
||||||
|
if len(s.CpuAllocator.cpus)-1 < MaxRequestedCpu {
|
||||||
|
s.Skip(fmt.Sprintf("test case cannot allocate requested cpus (%d cpus * %d containers)", nCpus, containerCount))
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func (s *HstSuite) SkipUnlessExtendedTestsBuilt() {
|
func (s *HstSuite) SkipUnlessExtendedTestsBuilt() {
|
||||||
imageName := "hs-test/nginx-http3"
|
imageName := "hs-test/nginx-http3"
|
||||||
|
|
||||||
|
101
extras/hs-test/infra/suite_cpu_pinning.go
Normal file
101
extras/hs-test/infra/suite_cpu_pinning.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
package hst
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
. "github.com/onsi/ginkgo/v2"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cpuPinningTests = map[string][]func(s *CpuPinningSuite){}
|
||||||
|
var cpuPinningSoloTests = map[string][]func(s *CpuPinningSuite){}
|
||||||
|
|
||||||
|
type CpuPinningSuite struct {
|
||||||
|
HstSuite
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterCpuPinningTests(tests ...func(s *CpuPinningSuite)) {
|
||||||
|
cpuPinningTests[getTestFilename()] = tests
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterCpuPinningSoloTests(tests ...func(s *CpuPinningSuite)) {
|
||||||
|
cpuPinningSoloTests[getTestFilename()] = tests
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CpuPinningSuite) SetupSuite() {
|
||||||
|
s.HstSuite.SetupSuite()
|
||||||
|
s.LoadNetworkTopology("tap")
|
||||||
|
s.LoadContainerTopology("singleCpuPinning")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CpuPinningSuite) SetupTest() {
|
||||||
|
// Skip if we cannot allocate 3 CPUs for test container
|
||||||
|
s.SkipIfNotEnoughAvailableCpus(1, 3)
|
||||||
|
s.CpuPerVpp = 3
|
||||||
|
s.HstSuite.SetupTest()
|
||||||
|
container := s.GetContainerByName(SingleTopoContainerVpp)
|
||||||
|
vpp, err := container.newVppInstance(container.AllocatedCpus)
|
||||||
|
s.AssertNotNil(vpp, fmt.Sprint(err))
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = Describe("CpuPinningSuite", Ordered, ContinueOnFailure, func() {
|
||||||
|
var s CpuPinningSuite
|
||||||
|
BeforeAll(func() {
|
||||||
|
s.SetupSuite()
|
||||||
|
})
|
||||||
|
BeforeEach(func() {
|
||||||
|
s.SetupTest()
|
||||||
|
})
|
||||||
|
AfterAll(func() {
|
||||||
|
s.TearDownSuite()
|
||||||
|
|
||||||
|
})
|
||||||
|
AfterEach(func() {
|
||||||
|
s.TearDownTest()
|
||||||
|
})
|
||||||
|
|
||||||
|
// https://onsi.github.io/ginkgo/#dynamically-generating-specs
|
||||||
|
for filename, tests := range cpuPinningTests {
|
||||||
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
|
pc := reflect.ValueOf(test).Pointer()
|
||||||
|
funcValue := runtime.FuncForPC(pc)
|
||||||
|
testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
|
||||||
|
It(testName, func(ctx SpecContext) {
|
||||||
|
s.Log(testName + ": BEGIN")
|
||||||
|
test(&s)
|
||||||
|
}, SpecTimeout(SuiteTimeout))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
var _ = Describe("CpuPinningSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
|
||||||
|
var s CpuPinningSuite
|
||||||
|
BeforeAll(func() {
|
||||||
|
s.SetupSuite()
|
||||||
|
})
|
||||||
|
BeforeEach(func() {
|
||||||
|
s.SetupTest()
|
||||||
|
})
|
||||||
|
AfterAll(func() {
|
||||||
|
s.TearDownSuite()
|
||||||
|
})
|
||||||
|
AfterEach(func() {
|
||||||
|
s.TearDownTest()
|
||||||
|
})
|
||||||
|
|
||||||
|
for filename, tests := range cpuPinningSoloTests {
|
||||||
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
|
pc := reflect.ValueOf(test).Pointer()
|
||||||
|
funcValue := runtime.FuncForPC(pc)
|
||||||
|
testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
|
||||||
|
It(testName, Label("SOLO"), func(ctx SpecContext) {
|
||||||
|
s.Log(testName + ": BEGIN")
|
||||||
|
test(&s)
|
||||||
|
}, SpecTimeout(SuiteTimeout))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
@ -88,6 +88,13 @@ type VppInstance struct {
|
|||||||
Connection *core.Connection
|
Connection *core.Connection
|
||||||
ApiStream api.Stream
|
ApiStream api.Stream
|
||||||
Cpus []int
|
Cpus []int
|
||||||
|
CpuConfig VppCpuConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
type VppCpuConfig struct {
|
||||||
|
PinMainCpu bool
|
||||||
|
PinWorkersCorelist bool
|
||||||
|
SkipCores int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vpp *VppInstance) getSuite() *HstSuite {
|
func (vpp *VppInstance) getSuite() *HstSuite {
|
||||||
@ -131,7 +138,7 @@ func (vpp *VppInstance) Start() error {
|
|||||||
defaultApiSocketFilePath,
|
defaultApiSocketFilePath,
|
||||||
defaultLogFilePath,
|
defaultLogFilePath,
|
||||||
)
|
)
|
||||||
configContent += vpp.generateCpuConfig()
|
configContent += vpp.generateVPPCpuConfig()
|
||||||
for _, c := range vpp.AdditionalConfig {
|
for _, c := range vpp.AdditionalConfig {
|
||||||
configContent += c.ToString()
|
configContent += c.ToString()
|
||||||
}
|
}
|
||||||
@ -476,18 +483,41 @@ func (vpp *VppInstance) Disconnect() {
|
|||||||
vpp.ApiStream.Close()
|
vpp.ApiStream.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vpp *VppInstance) generateCpuConfig() string {
|
func (vpp *VppInstance) setDefaultCpuConfig() {
|
||||||
|
vpp.CpuConfig.PinMainCpu = true
|
||||||
|
vpp.CpuConfig.PinWorkersCorelist = true
|
||||||
|
vpp.CpuConfig.SkipCores = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vpp *VppInstance) generateVPPCpuConfig() string {
|
||||||
var c Stanza
|
var c Stanza
|
||||||
var s string
|
var s string
|
||||||
|
startCpu := 0
|
||||||
if len(vpp.Cpus) < 1 {
|
if len(vpp.Cpus) < 1 {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
c.NewStanza("cpu").
|
|
||||||
Append(fmt.Sprintf("main-core %d", vpp.Cpus[0]))
|
c.NewStanza("cpu")
|
||||||
vpp.getSuite().Log(fmt.Sprintf("main-core %d", vpp.Cpus[0]))
|
|
||||||
workers := vpp.Cpus[1:]
|
// If skip-cores is valid, use as start value to assign main/workers CPUs
|
||||||
|
if vpp.CpuConfig.SkipCores != 0 {
|
||||||
|
c.Append(fmt.Sprintf("skip-cores %d", vpp.CpuConfig.SkipCores))
|
||||||
|
vpp.getSuite().Log(fmt.Sprintf("skip-cores %d", vpp.CpuConfig.SkipCores))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(vpp.Cpus) > vpp.CpuConfig.SkipCores {
|
||||||
|
startCpu = vpp.CpuConfig.SkipCores
|
||||||
|
}
|
||||||
|
|
||||||
|
if vpp.CpuConfig.PinMainCpu {
|
||||||
|
c.Append(fmt.Sprintf("main-core %d", vpp.Cpus[startCpu]))
|
||||||
|
vpp.getSuite().Log(fmt.Sprintf("main-core %d", vpp.Cpus[startCpu]))
|
||||||
|
}
|
||||||
|
|
||||||
|
workers := vpp.Cpus[startCpu+1:]
|
||||||
|
|
||||||
if len(workers) > 0 {
|
if len(workers) > 0 {
|
||||||
|
if vpp.CpuConfig.PinWorkersCorelist {
|
||||||
for i := 0; i < len(workers); i++ {
|
for i := 0; i < len(workers); i++ {
|
||||||
if i != 0 {
|
if i != 0 {
|
||||||
s = s + ", "
|
s = s + ", "
|
||||||
@ -496,6 +526,12 @@ func (vpp *VppInstance) generateCpuConfig() string {
|
|||||||
}
|
}
|
||||||
c.Append(fmt.Sprintf("corelist-workers %s", s))
|
c.Append(fmt.Sprintf("corelist-workers %s", s))
|
||||||
vpp.getSuite().Log("corelist-workers " + s)
|
vpp.getSuite().Log("corelist-workers " + s)
|
||||||
|
} else {
|
||||||
|
s = fmt.Sprintf("%d", len(workers))
|
||||||
|
c.Append(fmt.Sprintf("workers %s", s))
|
||||||
|
vpp.getSuite().Log("workers " + s)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return c.Close().ToString()
|
return c.Close().ToString()
|
||||||
}
|
}
|
||||||
|
11
extras/hs-test/topo-containers/singleCpuPinning.yaml
Normal file
11
extras/hs-test/topo-containers/singleCpuPinning.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
volumes:
|
||||||
|
- volume: &shared-vol
|
||||||
|
host-dir: "$HST_VOLUME_DIR/shared-vol"
|
||||||
|
|
||||||
|
containers:
|
||||||
|
- name: "vpp"
|
||||||
|
volumes:
|
||||||
|
- <<: *shared-vol
|
||||||
|
container-dir: "/tmp/vpp"
|
||||||
|
is-default-work-dir: true
|
Reference in New Issue
Block a user