hs-test: added multithreaded vpp proxy tests

- if a test is named '...MTTest', 3 cpus will be allocated to vpp
- updated docs

Type: test

Change-Id: I756dfb6cdbff4368d606ca3abbc1a510cd1d6b51
Signed-off-by: Adrian Villin <avillin@cisco.com>
This commit is contained in:
Adrian Villin
2024-11-20 11:11:35 +01:00
committed by Florin Coras
parent c990aae85a
commit d05f16d124
6 changed files with 67 additions and 16 deletions

View File

@ -68,6 +68,10 @@ For adding a new suite, please see `Modifying the framework`_ below.
Assumed are two docker containers, each with its own VPP instance running. One VPP then pings the other.
This can be put in file ``extras/hs-test/my_test.go`` and run with command ``make test TEST=MyTest``.
To add a multi-worker test, name it ``[name]MTTest``. Doing this, the framework will allocate 3 CPUs to a VPP container, no matter what ``CPUS`` is set to.
Only a single multi-worker VPP container is supported for now. Please register multi-worker tests as Solo tests to avoid reusing the same cores
when running in parallel.
::
package main
@ -77,7 +81,12 @@ This can be put in file ``extras/hs-test/my_test.go`` and run with command ``mak
)
func init(){
RegisterMySuiteTest(MyTest)
RegisterMySuiteTests(MyTest)
RegisterSoloMySuiteTests(MyMTTest)
}
func MyMTTest(s *MySuite){
MyTest(s)
}
func MyTest(s *MySuite) {
@ -86,8 +95,8 @@ This can be put in file ``extras/hs-test/my_test.go`` and run with command ``mak
serverVethAddress := s.NetInterfaces["server-iface"].Ip4AddressString()
result := clientVpp.Vppctl("ping " + serverVethAddress)
s.Log(result)
s.AssertNotNil(result)
s.Log(result)
}
@ -100,6 +109,7 @@ The framework allows us to filter test cases in a few different ways, using ``ma
* File name
* Test name
* All of the above as long as they are ordered properly, e.g. ``make test TEST=VethsSuite.http_test.go.HeaderServerTest``
* Multiple tests/suites: ``make test TEST=HttpClient,LdpSuite``
**Names are case sensitive!**
@ -308,6 +318,12 @@ or a new version incompatibility issue occurs.
Debugging a test
----------------
DRYRUN
^^^^^^
``make test TEST=[name] DRYRUN=true`` will setup and start most of the containers, but won't run any tests or start VPP. VPP and interfaces will be
configured automatically once you start VPP with the generated startup.conf file.
GDB
^^^

View File

@ -214,7 +214,7 @@ func (c *Container) Create() error {
func (c *Container) allocateCpus() {
c.Suite.StartedContainers = append(c.Suite.StartedContainers, c)
c.AllocatedCpus = c.Suite.AllocateCpus()
c.AllocatedCpus = c.Suite.AllocateCpus(c.Name)
c.Suite.Log("Allocated CPUs " + fmt.Sprint(c.AllocatedCpus) + " to container " + c.Name)
}

View File

@ -35,17 +35,17 @@ func iterateAndAppend(start int, end int, slice []int) []int {
var cpuAllocator *CpuAllocatorT = nil
func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int) (*CpuContext, error) {
func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*CpuContext, error) {
var cpuCtx CpuContext
// indexes, not actual cores
var minCpu, maxCpu int
if c.runningInCi {
minCpu = ((c.buildNumber) * c.maxContainerCount * nCpus)
maxCpu = ((c.buildNumber + 1) * c.maxContainerCount * nCpus) - 1
minCpu = ((c.buildNumber) * c.maxContainerCount * nCpus) + offset
maxCpu = ((c.buildNumber + 1) * c.maxContainerCount * nCpus) - 1 + offset
} else {
minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus)
maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1
minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset
maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset
}
if len(c.cpus)-1 < maxCpu {

View File

@ -135,12 +135,38 @@ func (s *HstSuite) SetupSuite() {
s.CpuCount = *NConfiguredCpus
}
func (s *HstSuite) AllocateCpus() []int {
cpuCtx, err := s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount)
// using Fail instead of AssertNil to make error message more readable
if err != nil {
Fail(fmt.Sprint(err))
func (s *HstSuite) AllocateCpus(containerName string) []int {
var cpuCtx *CpuContext
var err error
currentTestName := CurrentSpecReport().LeafNodeText
if strings.Contains(currentTestName, "MTTest") {
prevContainerCount := s.CpuAllocator.maxContainerCount
if strings.Contains(containerName, "vpp") {
// CPU range is assigned based on the Ginkgo process index (or build number if
// running in the CI), *NConfiguredCpus and a maxContainerCount.
// maxContainerCount is set to 4 when CpuAllocator is initialized.
// 4 is not a random number - all of our suites use a maximum of 4 containers simultaneously,
// and it's also the maximum number of containers we can run with *NConfiguredCpus=2 (with CPU0=true)
// on processors with 8 threads. Currently, the CpuAllocator puts all cores into a slice,
// makes the length of the slice divisible by 4x*NConfiguredCpus, and then the minCpu and
// maxCpu (range) for each container is calculated. Then we just offset based on minCpu,
// the number of started containers and *NConfiguredCpus. This way, every container
// uses the correct CPUs, even if multiple NUMA nodes are available.
// However, because of this, if we want to assign different number of cores to different containers,
// we have to change maxContainerCount to manipulate the CPU range. Hopefully a temporary workaround.
s.CpuAllocator.maxContainerCount = 1
cpuCtx, err = s.CpuAllocator.Allocate(1, 3, 0)
} else {
s.CpuAllocator.maxContainerCount = 3
cpuCtx, err = s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount, 2)
}
s.CpuAllocator.maxContainerCount = prevContainerCount
} else {
cpuCtx, err = s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount, 0)
}
s.AssertNil(err)
s.AddCpuContext(cpuCtx)
return cpuCtx.cpus
}
@ -369,8 +395,8 @@ func (s *HstSuite) SkipIfNotEnoughAvailableCpus() {
if availableCpus < maxRequestedCpu {
s.Skip(fmt.Sprintf("Test case cannot allocate requested cpus "+
"(%d cpus * %d containers, %d available). Try using 'CPU0=true'",
s.CpuCount, s.CpuAllocator.maxContainerCount, availableCpus))
"(%d containers * %d cpus, %d available). Try using 'CPU0=true'",
s.CpuAllocator.maxContainerCount, s.CpuCount, availableCpus))
}
}

View File

@ -189,7 +189,7 @@ var _ = Describe("VppProxySuite", Ordered, ContinueOnFailure, func() {
}
})
var _ = Describe("VppProxySuiteSolo", Ordered, ContinueOnFailure, func() {
var _ = Describe("VppProxySuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
var s VppProxySuite
BeforeAll(func() {
s.SetupSuite()

View File

@ -9,6 +9,7 @@ import (
func init() {
RegisterVppProxyTests(VppProxyHttpGetTcpTest, VppProxyHttpGetTlsTest, VppProxyHttpPutTcpTest, VppProxyHttpPutTlsTest,
VppConnectProxyGetTest, VppConnectProxyPutTest)
RegisterVppProxySoloTests(VppProxyHttpGetTcpMTTest, VppProxyHttpPutTcpMTTest)
RegisterVppUdpProxyTests(VppProxyUdpTest)
RegisterEnvoyProxyTests(EnvoyProxyHttpGetTcpTest, EnvoyProxyHttpPutTcpTest)
RegisterNginxProxyTests(NginxMirroringTest)
@ -25,6 +26,10 @@ func configureVppProxy(s *VppProxySuite, proto string, proxyPort uint16) {
s.Log("proxy configured: " + output)
}
func VppProxyHttpGetTcpMTTest(s *VppProxySuite) {
VppProxyHttpGetTcpTest(s)
}
func VppProxyHttpGetTcpTest(s *VppProxySuite) {
var proxyPort uint16 = 8080
configureVppProxy(s, "tcp", proxyPort)
@ -39,6 +44,10 @@ func VppProxyHttpGetTlsTest(s *VppProxySuite) {
s.CurlDownloadResource(uri)
}
func VppProxyHttpPutTcpMTTest(s *VppProxySuite) {
VppProxyHttpPutTcpTest(s)
}
func VppProxyHttpPutTcpTest(s *VppProxySuite) {
var proxyPort uint16 = 8080
configureVppProxy(s, "tcp", proxyPort)