hs-test: added dry run mode

- DRYRUN=true will set up most containers. Some need to be started
  manually (curl, nginx...). The framework will create a vpp-config
  file with interface configs that will get executed on VPP startup.
- set Ginkgo to use -v instead of -vv when running a single test
- s.Log() now supports formatting
- added 'useEnvVars' parameter to container.Exec

Type: test

Change-Id: Id1da7947a1448ee4b74b86cc4f243442256a5ba8
Signed-off-by: Adrian Villin <avillin@cisco.com>
This commit is contained in:
Adrian Villin
2024-09-25 14:49:11 +02:00
committed by Florin Coras
parent 77ca487742
commit 2acdf1e629
18 changed files with 299 additions and 132 deletions

View File

@ -86,6 +86,7 @@ help:
@echo " PARALLEL=[n-cpus] - number of test processes to spawn to run in parallel"
@echo " REPEAT=[n] - repeat tests up to N times or until a failure occurs"
@echo " CPU0=[true|false] - use cpu0"
@echo " DRYRUN=[true|false] - set up containers but don't run tests"
@echo
@echo "List of all tests:"
@$(MAKE) list-tests
@ -120,7 +121,8 @@ build-vpp-gcov:
test: .deps.ok .build.ok
@bash ./hs_test.sh --persist=$(PERSIST) --verbose=$(VERBOSE) \
--unconfigure=$(UNCONFIGURE) --debug=$(DEBUG) --test=$(TEST) --cpus=$(CPUS) \
--vppsrc=$(VPPSRC) --parallel=$(PARALLEL) --repeat=$(REPEAT) --cpu0=$(CPU0); \
--vppsrc=$(VPPSRC) --parallel=$(PARALLEL) --repeat=$(REPEAT) --cpu0=$(CPU0) \
--dryrun=$(DRYRUN); \
./script/compress.sh $$?
@ -129,14 +131,14 @@ test-debug: .deps.ok .build_debug.ok
@bash ./hs_test.sh --persist=$(PERSIST) --verbose=$(VERBOSE) \
--unconfigure=$(UNCONFIGURE) --debug=$(DEBUG) --test=$(TEST) --cpus=$(CPUS) \
--vppsrc=$(VPPSRC) --parallel=$(PARALLEL) --repeat=$(REPEAT) --debug_build=true \
--cpu0=$(CPU0); \
--cpu0=$(CPU0) --dryrun=$(DRYRUN); \
./script/compress.sh $$?
.PHONY: test-cov
test-cov: .deps.ok .build.cov.ok
@bash ./hs_test.sh --persist=$(PERSIST) --verbose=$(VERBOSE) \
--unconfigure=$(UNCONFIGURE) --debug=$(DEBUG) --test=$(TEST-HS) --cpus=$(CPUS) \
--vppsrc=$(VPPSRC) --cpu0=$(CPU0); \
--vppsrc=$(VPPSRC) --cpu0=$(CPU0) --dryrun=$(DRYRUN); \
./script/compress.sh $$?
.PHONY: test-leak

View File

@ -33,4 +33,8 @@ func TestHst(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "HST")
if *DryRun || *IsPersistent {
fmt.Println("\033[36m" + "Use 'make cleanup-hst' to remove IP files, " +
"namespaces and containers. \nPPID: " + ppid + "\033[0m")
}
}

View File

@ -5,12 +5,14 @@ source vars
args=
focused_test=0
persist_set=0
dryrun_set=0
unconfigure_set=0
debug_set=0
leak_check_set=0
debug_build=
ginkgo_args=
tc_names=()
dryrun=
for i in "$@"
do
@ -75,6 +77,13 @@ case "${i}" in
args="$args -cpu0"
fi
;;
--dryrun=*)
dryrun="${i#*=}"
if [ "$dryrun" = "true" ]; then
args="$args -dryrun"
dryrun_set=1
fi
;;
--leak_check=*)
leak_check="${i#*=}"
if [ "$leak_check" = "true" ]; then
@ -85,34 +94,39 @@ case "${i}" in
esac
done
if [ ${#tc_names[@]} -gt 1 ]
then
focused_test=0
fi
for name in "${tc_names[@]}"; do
ginkgo_args="$ginkgo_args --focus $name"
done
if [ $focused_test -eq 0 ] && [ $persist_set -eq 1 ]; then
echo "persist flag is not supported while running all tests!"
exit 1
if [ $focused_test -eq 0 ] && { [ $persist_set -eq 1 ] || [ $dryrun_set -eq 1 ]; }; then
echo -e "\e[1;31mpersist/dryrun flag is not supported while running all tests!\e[1;0m"
exit 2
fi
if [ $unconfigure_set -eq 1 ] && [ $focused_test -eq 0 ]; then
echo "a single test has to be specified when unconfigure is set"
exit 1
echo -e "\e[1;31ma single test has to be specified when unconfigure is set\e[1;0m"
exit 2
fi
if [ $persist_set -eq 1 ] && [ $unconfigure_set -eq 1 ]; then
echo "setting persist flag and unconfigure flag is not allowed"
exit 1
echo -e "\e[1;31msetting persist flag and unconfigure flag is not allowed\e[1;0m"
exit 2
fi
if [ $focused_test -eq 0 ] && [ $debug_set -eq 1 ]; then
echo "VPP debug flag is not supported while running all tests!"
exit 1
echo -e "\e[1;31mVPP debug flag is not supported while running all tests!\e[1;0m"
exit 2
fi
if [ $leak_check_set -eq 1 ]; then
if [ $focused_test -eq 0 ]; then
echo "a single test has to be specified when leak_check is set"
exit 1
echo -e "\e[1;31ma single test has to be specified when leak_check is set\e[1;0m"
exit 2
fi
ginkgo_args="--focus $tc_name"
sudo -E go run github.com/onsi/ginkgo/v2/ginkgo $ginkgo_args -- $args

View File

@ -314,8 +314,8 @@ func httpClientPostFile(s *NoTopoSuite, usePtr bool, fileSize int) {
serverAddress := s.HostAddr()
vpp := s.GetContainerByName("vpp").VppInstance
fileName := "/tmp/test_file.txt"
s.Log(vpp.Container.Exec("fallocate -l " + strconv.Itoa(fileSize) + " " + fileName))
s.Log(vpp.Container.Exec("ls -la " + fileName))
s.Log(vpp.Container.Exec(false, "fallocate -l "+strconv.Itoa(fileSize)+" "+fileName))
s.Log(vpp.Container.Exec(false, "ls -la "+fileName))
server := ghttp.NewUnstartedServer()
l, err := net.Listen("tcp", serverAddress+":80")
@ -615,7 +615,7 @@ func HttpStaticFileHandlerTestFunction(s *NoTopoSuite, max_age string) {
content2 := "<html><body><p>Page</p></body></html>"
vpp := s.GetContainerByName("vpp").VppInstance
vpp.Container.Exec("mkdir -p " + wwwRootPath)
vpp.Container.Exec(false, "mkdir -p "+wwwRootPath)
err := vpp.Container.CreateFile(wwwRootPath+"/index.html", content)
s.AssertNil(err, fmt.Sprint(err))
err = vpp.Container.CreateFile(wwwRootPath+"/page.html", content2)
@ -671,8 +671,8 @@ func HttpStaticFileHandlerTestFunction(s *NoTopoSuite, max_age string) {
func HttpStaticPathTraversalTest(s *NoTopoSuite) {
vpp := s.GetContainerByName("vpp").VppInstance
vpp.Container.Exec("mkdir -p " + wwwRootPath)
vpp.Container.Exec("mkdir -p " + "/tmp/secret_folder")
vpp.Container.Exec(false, "mkdir -p "+wwwRootPath)
vpp.Container.Exec(false, "mkdir -p "+"/tmp/secret_folder")
err := vpp.Container.CreateFile("/tmp/secret_folder/secret_file.txt", "secret")
s.AssertNil(err, fmt.Sprint(err))
serverAddress := s.VppAddr()
@ -693,7 +693,7 @@ func HttpStaticPathTraversalTest(s *NoTopoSuite) {
func HttpStaticMovedTest(s *NoTopoSuite) {
vpp := s.GetContainerByName("vpp").VppInstance
vpp.Container.Exec("mkdir -p " + wwwRootPath + "/tmp.aaa")
vpp.Container.Exec(false, "mkdir -p "+wwwRootPath+"/tmp.aaa")
err := vpp.Container.CreateFile(wwwRootPath+"/tmp.aaa/index.html", "<html><body><p>Hello</p></body></html>")
s.AssertNil(err, fmt.Sprint(err))
serverAddress := s.VppAddr()
@ -715,7 +715,7 @@ func HttpStaticMovedTest(s *NoTopoSuite) {
func HttpStaticNotFoundTest(s *NoTopoSuite) {
vpp := s.GetContainerByName("vpp").VppInstance
vpp.Container.Exec("mkdir -p " + wwwRootPath)
vpp.Container.Exec(false, "mkdir -p "+wwwRootPath)
serverAddress := s.VppAddr()
s.Log(vpp.Vppctl("http static server www-root " + wwwRootPath + " uri tcp://" + serverAddress + "/80 debug"))

View File

@ -328,6 +328,7 @@ func (c *Container) getVolumesAsSlice() []string {
}
core_pattern, err := sysctl.Read("kernel.core_pattern")
core_pattern = strings.ReplaceAll(core_pattern, "%", "%%")
if err == nil {
if len(core_pattern) > 0 && core_pattern[0] != '|' {
index := strings.LastIndex(core_pattern, "/")
@ -420,6 +421,19 @@ func (c *Container) CreateFile(destFileName string, content string) error {
return nil
}
func (c *Container) CreateFileInWorkDir(fileName string, contents string) error {
file, err := os.Create(c.GetHostWorkDir() + "/" + fileName)
if err != nil {
return err
}
defer file.Close()
_, err = file.Write([]byte(contents))
if err != nil {
return err
}
return nil
}
func (c *Container) GetFile(sourceFileName, targetFileName string) error {
cmd := exec.Command("docker", "cp", c.Name+":"+sourceFileName, targetFileName)
return cmd.Run()
@ -429,19 +443,29 @@ func (c *Container) GetFile(sourceFileName, targetFileName string) error {
* Executes in detached mode so that the started application can continue to run
* without blocking execution of test
*/
func (c *Container) ExecServer(command string, arguments ...any) {
func (c *Container) ExecServer(useEnvVars bool, command string, arguments ...any) {
var envVars string
serverCommand := fmt.Sprintf(command, arguments...)
containerExecCommand := "docker exec -d" + c.getEnvVarsAsCliOption() +
" " + c.Name + " " + serverCommand
if useEnvVars {
envVars = c.getEnvVarsAsCliOption()
} else {
envVars = ""
}
containerExecCommand := fmt.Sprintf("docker exec -d %s %s %s", envVars, c.Name, serverCommand)
GinkgoHelper()
c.Suite.Log(containerExecCommand)
c.Suite.AssertNil(exechelper.Run(containerExecCommand))
}
func (c *Container) Exec(command string, arguments ...any) string {
cliCommand := fmt.Sprintf(command, arguments...)
containerExecCommand := "docker exec" + c.getEnvVarsAsCliOption() +
" " + c.Name + " " + cliCommand
func (c *Container) Exec(useEnvVars bool, command string, arguments ...any) string {
var envVars string
serverCommand := fmt.Sprintf(command, arguments...)
if useEnvVars {
envVars = c.getEnvVarsAsCliOption()
} else {
envVars = ""
}
containerExecCommand := fmt.Sprintf("docker exec %s %s %s", envVars, c.Name, serverCommand)
GinkgoHelper()
c.Suite.Log(containerExecCommand)
byteOutput, err := exechelper.CombinedOutput(containerExecCommand)
@ -518,7 +542,7 @@ func (c *Container) stop() error {
return nil
}
func (c *Container) CreateConfig(targetConfigName string, templateName string, values any) {
func (c *Container) CreateConfigFromTemplate(targetConfigName string, templateName string, values any) {
template := template.Must(template.ParseFiles(templateName))
f, err := os.CreateTemp(logDir, "hst-config")

View File

@ -41,6 +41,7 @@ var IsDebugBuild = flag.Bool("debug_build", false, "some paths are different wit
var UseCpu0 = flag.Bool("cpu0", false, "use cpu0")
var IsLeakCheck = flag.Bool("leak_check", false, "run leak-check tests")
var ParallelTotal = flag.Lookup("ginkgo.parallel.total")
var DryRun = flag.Bool("dryrun", false, "set up containers but don't run tests")
var NumaAwareCpuAlloc bool
var SuiteTimeout time.Duration
@ -62,6 +63,18 @@ type HstSuite struct {
Docker *client.Client
}
type colors struct {
grn string
pur string
rst string
}
var Colors = colors{
grn: "\033[32m",
pur: "\033[35m",
rst: "\033[0m",
}
// used for colorful ReportEntry
type StringerStruct struct {
Label string
@ -104,8 +117,8 @@ func (s *HstSuite) newDockerClient() {
func (s *HstSuite) SetupSuite() {
s.CreateLogger()
s.Log("[* SUITE SETUP]")
s.newDockerClient()
s.Log("Suite Setup")
RegisterFailHandler(func(message string, callerSkip ...int) {
s.HstFail()
Fail(message, callerSkip...)
@ -139,13 +152,16 @@ func (s *HstSuite) AddCpuContext(cpuCtx *CpuContext) {
func (s *HstSuite) TearDownSuite() {
defer s.LogFile.Close()
defer s.Docker.Close()
s.Log("Suite Teardown")
if *IsPersistent || *DryRun {
return
}
s.Log("[* SUITE TEARDOWN]")
s.UnconfigureNetworkTopology()
}
func (s *HstSuite) TearDownTest() {
s.Log("Test Teardown")
if *IsPersistent {
s.Log("[* TEST TEARDOWN]")
if *IsPersistent || *DryRun {
return
}
coreDump := s.WaitForCoreDump()
@ -167,7 +183,7 @@ func (s *HstSuite) SkipIfUnconfiguring() {
}
func (s *HstSuite) SetupTest() {
s.Log("Test Setup")
s.Log("[* TEST SETUP]")
s.StartedContainers = s.StartedContainers[:0]
s.SkipIfUnconfiguring()
s.SetupContainers()
@ -306,13 +322,13 @@ func (s *HstSuite) CreateLogger() {
// Logs to files by default, logs to stdout when VERBOSE=true with GinkgoWriter
// to keep console tidy
func (s *HstSuite) Log(arg any) {
logs := strings.Split(fmt.Sprint(arg), "\n")
func (s *HstSuite) Log(log any, arg ...any) {
logs := strings.Split(fmt.Sprintf(fmt.Sprint(log), arg...), "\n")
for _, line := range logs {
s.Logger.Println(line)
}
if *IsVerbose {
GinkgoWriter.Println(arg)
GinkgoWriter.Println(fmt.Sprintf(fmt.Sprint(log), arg...))
}
}
@ -477,6 +493,13 @@ func (s *HstSuite) LoadContainerTopology(topologyName string) {
}
s.Containers[newContainer.Name] = newContainer
}
if *DryRun {
s.Log(Colors.pur + "* Containers used by this suite (some might already be running):" + Colors.rst)
for name := range s.Containers {
s.Log("%sdocker start %s && docker exec -it %s bash%s", Colors.pur, name, name, Colors.rst)
}
}
}
func (s *HstSuite) LoadNetworkTopology(topologyName string) {
@ -564,14 +587,18 @@ func (s *HstSuite) ConfigureNetworkTopology(topologyName string) {
}
func (s *HstSuite) UnconfigureNetworkTopology() {
if *IsPersistent {
return
}
for _, nc := range s.NetConfigs {
nc.unconfigure()
}
}
func (s *HstSuite) LogStartedContainers() {
s.Log("%s* Started containers:%s", Colors.grn, Colors.rst)
for _, container := range s.StartedContainers {
s.Log(Colors.grn + container.Name + Colors.rst)
}
}
func (s *HstSuite) GetTestId() string {
testName := s.GetCurrentTestName()

View File

@ -42,6 +42,11 @@ func (s *CpuPinningSuite) SetupTest() {
container := s.GetContainerByName(SingleTopoContainerVpp)
vpp, err := container.newVppInstance(container.AllocatedCpus)
s.AssertNotNil(vpp, fmt.Sprint(err))
if *DryRun {
s.LogStartedContainers()
s.Skip("Dry run mode = true")
}
}
func (s *CpuPinningSuite) TearDownTest() {

View File

@ -10,6 +10,7 @@ import (
"reflect"
"runtime"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
)
@ -64,12 +65,8 @@ func (s *EnvoyProxySuite) SetupTest() {
vppContainer := s.GetContainerByName(VppContainerName)
vpp, err := vppContainer.newVppInstance(vppContainer.AllocatedCpus, sessionConfig)
s.AssertNotNil(vpp, fmt.Sprint(err))
s.AssertNil(vpp.Start())
clientInterface := s.GetInterfaceByName(ClientTapInterfaceName)
s.AssertNil(vpp.createTap(clientInterface, 1))
serverInterface := s.GetInterfaceByName(ServerTapInterfaceName)
s.AssertNil(vpp.createTap(serverInterface, 2))
vppContainer.Exec("chmod 777 -R %s", vppContainer.GetContainerWorkDir())
// nginx HTTP server
nginxContainer := s.GetTransientContainerByName(NginxServerContainerName)
@ -86,12 +83,11 @@ func (s *EnvoyProxySuite) SetupTest() {
Port: s.nginxPort,
Timeout: s.maxTimeout,
}
nginxContainer.CreateConfig(
nginxContainer.CreateConfigFromTemplate(
"/nginx.conf",
"./resources/nginx/nginx_server.conf",
nginxSettings,
)
s.AssertNil(nginxContainer.Start())
// Envoy
envoyContainer := s.GetContainerByName(EnvoyProxyContainerName)
@ -109,19 +105,35 @@ func (s *EnvoyProxySuite) SetupTest() {
ServerPort: s.nginxPort,
ProxyPort: s.proxyPort,
}
envoyContainer.CreateConfig(
envoyContainer.CreateConfigFromTemplate(
"/etc/envoy/envoy.yaml",
"resources/envoy/proxy.yaml",
envoySettings,
)
s.AssertNil(envoyContainer.Start())
s.AssertNil(vpp.Start())
// wait for VPP to start
time.Sleep(time.Second * 1)
s.AssertNil(vpp.createTap(clientInterface, 1))
s.AssertNil(vpp.createTap(serverInterface, 2))
vppContainer.Exec(false, "chmod 777 -R %s", vppContainer.GetContainerWorkDir())
// Add Ipv4 ARP entry for nginx HTTP server, otherwise first request fail (HTTP error 503)
arp := fmt.Sprintf("set ip neighbor %s %s %s",
serverInterface.Peer.Name(),
serverInterface.Ip4AddressString(),
serverInterface.HwAddress)
if *DryRun {
vpp.AppendToCliConfig(arp)
s.LogStartedContainers()
s.Log("%s* Proxy IP used in tests: %s:%d%s", Colors.pur, s.ProxyAddr(), s.ProxyPort(), Colors.rst)
s.Skip("Dry run mode = true")
}
vppContainer.VppInstance.Vppctl(arp)
s.AssertNil(nginxContainer.Start())
s.AssertNil(envoyContainer.Start())
}
func (s *EnvoyProxySuite) TearDownTest() {

View File

@ -60,24 +60,34 @@ func (s *LdpSuite) SetupTest() {
serverVpp, err := serverContainer.newVppInstance(serverContainer.AllocatedCpus, sessionConfig)
s.AssertNotNil(serverVpp, fmt.Sprint(err))
s.SetupServerVpp()
// ... For client
clientContainer := s.GetContainerByName("client-vpp")
clientVpp, err := clientContainer.newVppInstance(clientContainer.AllocatedCpus, sessionConfig)
s.AssertNotNil(clientVpp, fmt.Sprint(err))
s.setupClientVpp()
serverContainer.AddEnvVar("VCL_CONFIG", serverContainer.GetContainerWorkDir()+"/vcl_srv.conf")
clientContainer.AddEnvVar("VCL_CONFIG", clientContainer.GetContainerWorkDir()+"/vcl_cln.conf")
serverContainer.AddEnvVar("VCL_CONFIG", serverContainer.GetContainerWorkDir()+"/vcl.conf")
clientContainer.AddEnvVar("VCL_CONFIG", clientContainer.GetContainerWorkDir()+"/vcl.conf")
for _, container := range s.StartedContainers {
container.AddEnvVar("LD_PRELOAD", "/usr/lib/libvcl_ldpreload.so")
container.AddEnvVar("LDP_DEBUG", "0")
container.AddEnvVar("VCL_DEBUG", "0")
}
s.CreateVclConfig(serverContainer)
s.CreateVclConfig(clientContainer)
s.SetupServerVpp(serverContainer)
s.setupClientVpp(clientContainer)
if *DryRun {
s.LogStartedContainers()
s.Log("\n%s* LD_PRELOAD and VCL_CONFIG server/client paths:", Colors.grn)
s.Log("LD_PRELOAD=/usr/lib/libvcl_ldpreload.so")
s.Log("VCL_CONFIG=%s/vcl.conf", serverContainer.GetContainerWorkDir())
s.Log("VCL_CONFIG=%s/vcl.conf%s\n", clientContainer.GetContainerWorkDir(), Colors.rst)
s.Skip("Dry run mode = true")
}
}
func (s *LdpSuite) TearDownTest() {
@ -89,10 +99,25 @@ func (s *LdpSuite) TearDownTest() {
}
func (s *LdpSuite) SetupServerVpp() {
var srvVclConf Stanza
serverContainer := s.GetContainerByName("server-vpp")
serverVclFileName := serverContainer.GetHostWorkDir() + "/vcl_srv.conf"
func (s *LdpSuite) CreateVclConfig(container *Container) {
var vclConf Stanza
vclFileName := container.GetHostWorkDir() + "/vcl.conf"
appSocketApi := fmt.Sprintf("app-socket-api %s/var/run/app_ns_sockets/default",
container.GetContainerWorkDir())
err := vclConf.
NewStanza("vcl").
Append("rx-fifo-size 4000000").
Append("tx-fifo-size 4000000").
Append("app-scope-local").
Append("app-scope-global").
Append("use-mq-eventfd").
Append(appSocketApi).Close().
SaveToFile(vclFileName)
s.AssertNil(err, fmt.Sprint(err))
}
func (s *LdpSuite) SetupServerVpp(serverContainer *Container) {
serverVpp := serverContainer.VppInstance
s.AssertNil(serverVpp.Start())
@ -100,25 +125,9 @@ func (s *LdpSuite) SetupServerVpp() {
idx, err := serverVpp.createAfPacket(serverVeth)
s.AssertNil(err, fmt.Sprint(err))
s.AssertNotEqual(0, idx)
serverAppSocketApi := fmt.Sprintf("app-socket-api %s/var/run/app_ns_sockets/default",
serverContainer.GetContainerWorkDir())
err = srvVclConf.
NewStanza("vcl").
Append("rx-fifo-size 4000000").
Append("tx-fifo-size 4000000").
Append("app-scope-local").
Append("app-scope-global").
Append("use-mq-eventfd").
Append(serverAppSocketApi).Close().
SaveToFile(serverVclFileName)
s.AssertNil(err, fmt.Sprint(err))
}
func (s *LdpSuite) setupClientVpp() {
var clnVclConf Stanza
clientContainer := s.GetContainerByName("client-vpp")
clientVclFileName := clientContainer.GetHostWorkDir() + "/vcl_cln.conf"
func (s *LdpSuite) setupClientVpp(clientContainer *Container) {
clientVpp := clientContainer.VppInstance
s.AssertNil(clientVpp.Start())
@ -126,19 +135,6 @@ func (s *LdpSuite) setupClientVpp() {
idx, err := clientVpp.createAfPacket(clientVeth)
s.AssertNil(err, fmt.Sprint(err))
s.AssertNotEqual(0, idx)
clientAppSocketApi := fmt.Sprintf("app-socket-api %s/var/run/app_ns_sockets/default",
clientContainer.GetContainerWorkDir())
err = clnVclConf.
NewStanza("vcl").
Append("rx-fifo-size 4000000").
Append("tx-fifo-size 4000000").
Append("app-scope-local").
Append("app-scope-global").
Append("use-mq-eventfd").
Append(clientAppSocketApi).Close().
SaveToFile(clientVclFileName)
s.AssertNil(err, fmt.Sprint(err))
}
var _ = Describe("LdpSuite", Ordered, ContinueOnFailure, func() {

View File

@ -58,11 +58,8 @@ func (s *NginxProxySuite) SetupTest() {
vppContainer := s.GetContainerByName(VppContainerName)
vpp, err := vppContainer.newVppInstance(vppContainer.AllocatedCpus, sessionConfig)
s.AssertNotNil(vpp, fmt.Sprint(err))
s.AssertNil(vpp.Start())
clientInterface := s.GetInterfaceByName(MirroringClientInterfaceName)
s.AssertNil(vpp.createTap(clientInterface, 1))
serverInterface := s.GetInterfaceByName(MirroringServerInterfaceName)
s.AssertNil(vpp.createTap(serverInterface, 2))
// nginx proxy
nginxProxyContainer := s.GetContainerByName(NginxProxyContainerName)
@ -81,11 +78,23 @@ func (s *NginxProxySuite) SetupTest() {
Address: serverInterface.Ip4AddressString(),
Timeout: s.maxTimeout,
}
nginxServerContainer.CreateConfig(
nginxServerContainer.CreateConfigFromTemplate(
"/nginx.conf",
"./resources/nginx/nginx_server_mirroring.conf",
nginxSettings,
)
s.AssertNil(vpp.Start())
s.AssertNil(vpp.createTap(clientInterface, 1))
s.AssertNil(vpp.createTap(serverInterface, 2))
if *DryRun {
s.LogStartedContainers()
s.Log("%s* Proxy IP used in tests: %s:%d%s", Colors.pur, s.ProxyAddr(), s.ProxyPort(), Colors.rst)
s.Skip("Dry run mode = true")
}
s.AssertNil(nginxProxyContainer.Start())
s.AssertNil(nginxServerContainer.Start())
}
@ -119,7 +128,7 @@ func (s *NginxProxySuite) CreateNginxProxyConfig(container *Container, multiThre
Server: serverInterface.Ip4AddressString(),
Port: s.proxyPort,
}
container.CreateConfig(
container.CreateConfigFromTemplate(
"/nginx.conf",
"./resources/nginx/nginx_proxy_mirroring.conf",
values,

View File

@ -55,11 +55,15 @@ func (s *NoTopoSuite) SetupTest() {
container := s.GetContainerByName(SingleTopoContainerVpp)
vpp, _ := container.newVppInstance(container.AllocatedCpus, sessionConfig)
s.AssertNil(vpp.Start())
tapInterface := s.GetInterfaceByName(TapInterfaceName)
s.AssertNil(vpp.createTap(tapInterface), "failed to create tap interface")
if *DryRun {
s.LogStartedContainers()
s.Skip("Dry run mode = true")
}
}
func (s *NoTopoSuite) TearDownTest() {
@ -81,7 +85,7 @@ func (s *NoTopoSuite) CreateNginxConfig(container *Container, multiThreadWorkers
}{
Workers: workers,
}
container.CreateConfig(
container.CreateConfigFromTemplate(
"/nginx.conf",
"./resources/nginx/nginx.conf",
values,
@ -131,7 +135,7 @@ func (s *NoTopoSuite) CreateNginxHttp3Config(container *Container) {
}{
LogPrefix: container.Name,
}
container.CreateConfig(
container.CreateConfigFromTemplate(
"/nginx.conf",
"./resources/nginx/nginx_http3.conf",
nginxSettings,

View File

@ -60,15 +60,18 @@ func (s *VethsSuite) SetupTest() {
serverVpp, err := serverContainer.newVppInstance(serverContainer.AllocatedCpus, sessionConfig)
s.AssertNotNil(serverVpp, fmt.Sprint(err))
s.SetupServerVpp()
// ... For client
clientContainer := s.GetContainerByName("client-vpp")
clientVpp, err := clientContainer.newVppInstance(clientContainer.AllocatedCpus, sessionConfig)
s.AssertNotNil(clientVpp, fmt.Sprint(err))
s.SetupServerVpp()
s.setupClientVpp()
if *DryRun {
s.LogStartedContainers()
s.Skip("Dry run mode = true")
}
}
func (s *VethsSuite) SetupServerVpp() {

View File

@ -58,11 +58,9 @@ func (s *VppProxySuite) SetupTest() {
vppContainer := s.GetContainerByName(VppProxyContainerName)
vpp, err := vppContainer.newVppInstance(vppContainer.AllocatedCpus)
s.AssertNotNil(vpp, fmt.Sprint(err))
s.AssertNil(vpp.Start())
clientInterface := s.GetInterfaceByName(ClientTapInterfaceName)
s.AssertNil(vpp.createTap(clientInterface, 1))
serverInterface := s.GetInterfaceByName(ServerTapInterfaceName)
s.AssertNil(vpp.createTap(serverInterface, 2))
// nginx HTTP server
nginxContainer := s.GetTransientContainerByName(NginxServerContainerName)
@ -79,12 +77,21 @@ func (s *VppProxySuite) SetupTest() {
Port: s.nginxPort,
Timeout: s.maxTimeout,
}
nginxContainer.CreateConfig(
nginxContainer.CreateConfigFromTemplate(
"/nginx.conf",
"./resources/nginx/nginx_server.conf",
nginxSettings,
)
s.AssertNil(nginxContainer.Start())
s.AssertNil(vpp.Start())
s.AssertNil(vpp.createTap(clientInterface, 1))
s.AssertNil(vpp.createTap(serverInterface, 2))
if *DryRun {
s.LogStartedContainers()
s.Skip("Dry run mode = true")
}
}
func (s *VppProxySuite) TearDownTest() {

View File

@ -270,7 +270,7 @@ func (s *HstSuite) StartServerApp(c *Container, processName string, cmd string,
running chan error, done chan struct{}) {
s.Log("starting server")
c.ExecServer(cmd)
c.ExecServer(true, cmd)
cmd2 := exec.Command("docker", "exec", c.Name, "pidof", processName)
err := cmd2.Run()
if err != nil {

View File

@ -37,6 +37,7 @@ const vppConfigTemplate = `unix {
coredump-size unlimited
cli-listen %[1]s%[2]s
runtime-dir %[1]s/var/run
%[5]s
}
api-trace {
@ -122,18 +123,27 @@ func (vpp *VppInstance) getEtcDir() string {
return vpp.Container.GetContainerWorkDir() + "/etc/vpp"
}
func (vpp *VppInstance) Start() error {
maxReconnectAttempts := 3
// Replace default logger in govpp with our own
govppLogger := logrus.New()
govppLogger.SetOutput(io.MultiWriter(vpp.getSuite().Logger.Writer(), GinkgoWriter))
core.SetLogger(govppLogger)
// Create folders
containerWorkDir := vpp.Container.GetContainerWorkDir()
// Appends a string to '[host-work-dir]/cli-config.conf'.
// Creates the conf file if it doesn't exist. Used for dry-run mode.
func (vpp *VppInstance) AppendToCliConfig(vppCliConfig string) {
f, err := os.OpenFile(vpp.Container.GetHostWorkDir()+"/cli-config.conf", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
vpp.getSuite().AssertNil(err)
_, err = f.Write([]byte(vppCliConfig))
vpp.getSuite().AssertNil(err)
err = f.Close()
vpp.getSuite().AssertNil(err)
}
vpp.Container.Exec("mkdir --mode=0700 -p " + vpp.getRunDir())
vpp.Container.Exec("mkdir --mode=0700 -p " + vpp.getLogDir())
vpp.Container.Exec("mkdir --mode=0700 -p " + vpp.getEtcDir())
func (vpp *VppInstance) Start() error {
containerWorkDir := vpp.Container.GetContainerWorkDir()
var cliConfig string
if *DryRun {
cliConfig = fmt.Sprintf("exec %s/cli-config.conf", containerWorkDir)
}
vpp.Container.Exec(false, "mkdir --mode=0700 -p "+vpp.getRunDir())
vpp.Container.Exec(false, "mkdir --mode=0700 -p "+vpp.getLogDir())
vpp.Container.Exec(false, "mkdir --mode=0700 -p "+vpp.getEtcDir())
// Create startup.conf inside the container
configContent := fmt.Sprintf(
@ -142,6 +152,7 @@ func (vpp *VppInstance) Start() error {
defaultCliSocketFilePath,
defaultApiSocketFilePath,
defaultLogFilePath,
cliConfig,
)
configContent += vpp.generateVPPCpuConfig()
for _, c := range vpp.AdditionalConfig {
@ -154,7 +165,20 @@ func (vpp *VppInstance) Start() error {
cliContent := "#!/usr/bin/bash\nvppctl -s " + vpp.getRunDir() + "/cli.sock"
vppcliFileName := "/usr/bin/vppcli"
vpp.Container.CreateFile(vppcliFileName, cliContent)
vpp.Container.Exec("chmod 0755 " + vppcliFileName)
vpp.Container.Exec(false, "chmod 0755 "+vppcliFileName)
if *DryRun {
vpp.getSuite().Log("%s* Commands to start VPP and VPPCLI:", Colors.pur)
vpp.getSuite().Log("vpp -c %s/startup.conf", vpp.getEtcDir())
vpp.getSuite().Log("vppcli (= vppctl -s %s/cli.sock)%s\n", vpp.getRunDir(), Colors.rst)
return nil
}
maxReconnectAttempts := 3
// Replace default logger in govpp with our own
govppLogger := logrus.New()
govppLogger.SetOutput(io.MultiWriter(vpp.getSuite().Logger.Writer(), GinkgoWriter))
core.SetLogger(govppLogger)
vpp.getSuite().Log("starting vpp")
if *IsVppDebug {
@ -168,7 +192,7 @@ func (vpp *VppInstance) Start() error {
cont <- true
}()
vpp.Container.ExecServer("su -c \"vpp -c " + startupFileName + " &> /proc/1/fd/1\"")
vpp.Container.ExecServer(false, "su -c \"vpp -c "+startupFileName+" &> /proc/1/fd/1\"")
fmt.Println("run following command in different terminal:")
fmt.Println("docker exec -it " + vpp.Container.Name + " gdb -ex \"attach $(docker exec " + vpp.Container.Name + " pidof vpp)\"")
fmt.Println("Afterwards press CTRL+\\ to continue")
@ -176,7 +200,7 @@ func (vpp *VppInstance) Start() error {
fmt.Println("continuing...")
} else {
// Start VPP
vpp.Container.ExecServer("su -c \"vpp -c " + startupFileName + " &> /proc/1/fd/1\"")
vpp.Container.ExecServer(false, "su -c \"vpp -c "+startupFileName+" &> /proc/1/fd/1\"")
}
vpp.getSuite().Log("connecting to vpp")
@ -256,6 +280,23 @@ func (vpp *VppInstance) WaitForApp(appName string, timeout int) {
func (vpp *VppInstance) createAfPacket(
veth *NetInterface,
) (interface_types.InterfaceIndex, error) {
if *DryRun {
if ip4Address, err := veth.Ip4AddrAllocator.NewIp4InterfaceAddress(veth.Peer.NetworkNumber); err == nil {
veth.Ip4Address = ip4Address
} else {
return 0, err
}
vppCliConfig := fmt.Sprintf(
"create host-interface name %s\n"+
"set int state host-%s up\n"+
"set int ip addr host-%s %s\n",
veth.Name(),
veth.Name(),
veth.Name(), veth.Ip4Address)
vpp.AppendToCliConfig(vppCliConfig)
vpp.getSuite().Log("%s* Interface added:\n%s%s", Colors.grn, vppCliConfig, Colors.rst)
return 1, nil
}
createReq := &af_packet.AfPacketCreateV3{
Mode: 1,
UseRandomHwAddr: true,
@ -382,14 +423,28 @@ func (vpp *VppInstance) addAppNamespace(
return nil
}
func (vpp *VppInstance) createTap(
tap *NetInterface,
tapId ...uint32,
) error {
func (vpp *VppInstance) createTap(tap *NetInterface, tapId ...uint32) error {
var id uint32 = 1
if len(tapId) > 0 {
id = tapId[0]
}
if *DryRun {
vppCliConfig := fmt.Sprintf("create tap id %d host-if-name %s host-ip4-addr %s\n"+
"set int ip addr tap%d %s\n"+
"set int state tap%d up\n",
id,
tap.name,
tap.Ip4Address,
id,
tap.Peer.Ip4Address,
id,
)
vpp.AppendToCliConfig(vppCliConfig)
vpp.getSuite().Log("%s* Interface added:\n%s%s", Colors.grn, vppCliConfig, Colors.rst)
return nil
}
createTapReq := &tapv2.TapCreateV3{
ID: id,
HostIfNameSet: true,

View File

@ -29,7 +29,7 @@ func testVppEcho(s *VethsSuite, proto string) {
" use-app-socket-api" +
" uri " + uri
s.Log(serverCommand)
echoSrvContainer.ExecServer(serverCommand)
echoSrvContainer.ExecServer(true, serverCommand)
echoClnContainer := s.GetContainerByName("client-app")
@ -37,6 +37,6 @@ func testVppEcho(s *VethsSuite, proto string) {
" socket-name " + echoClnContainer.GetContainerWorkDir() + "/var/run/app_ns_sockets/default" +
" use-app-socket-api uri " + uri
s.Log(clientCommand)
o := echoClnContainer.Exec(clientCommand)
o := echoClnContainer.Exec(true, clientCommand)
s.Log(o)
}

View File

@ -1,5 +1,10 @@
#!/usr/bin/env bash
if [ $1 == 2 ]
then
exit 1
fi
# if failed-summary.log is not empty, exit status = 1
if [ -s "${HS_ROOT}/summary/failed-summary.log" ]
then

View File

@ -51,7 +51,7 @@ func testXEchoVclClient(s *VethsSuite, proto string) {
testClientCommand := "vcl_test_client -N 100 -p " + proto + " " + serverVeth.Ip4AddressString() + " " + port
s.Log(testClientCommand)
echoClnContainer.AddEnvVar("VCL_CONFIG", "/vcl.conf")
o := echoClnContainer.Exec(testClientCommand)
o := echoClnContainer.Exec(true, testClientCommand)
s.Log(o)
s.AssertContains(o, "CLIENT RESULTS")
}
@ -72,7 +72,7 @@ func testXEchoVclServer(s *VethsSuite, proto string) {
srvAppCont.CreateFile("/vcl.conf", getVclConfig(srvVppCont))
srvAppCont.AddEnvVar("VCL_CONFIG", "/vcl.conf")
vclSrvCmd := fmt.Sprintf("vcl_test_server -p %s %s", proto, port)
srvAppCont.ExecServer(vclSrvCmd)
srvAppCont.ExecServer(true, vclSrvCmd)
serverVeth := s.GetInterfaceByName(ServerInterfaceName)
serverVethAddress := serverVeth.Ip4AddressString()
@ -90,7 +90,7 @@ func testVclEcho(s *VethsSuite, proto string) {
srvAppCont.CreateFile("/vcl.conf", getVclConfig(srvVppCont))
srvAppCont.AddEnvVar("VCL_CONFIG", "/vcl.conf")
srvAppCont.ExecServer("vcl_test_server -p " + proto + " " + port)
srvAppCont.ExecServer(true, "vcl_test_server -p "+proto+" "+port)
serverVeth := s.GetInterfaceByName(ServerInterfaceName)
serverVethAddress := serverVeth.Ip4AddressString()
@ -100,7 +100,7 @@ func testVclEcho(s *VethsSuite, proto string) {
testClientCommand := "vcl_test_client -p " + proto + " " + serverVethAddress + " " + port
echoClnContainer.AddEnvVar("VCL_CONFIG", "/vcl.conf")
o := echoClnContainer.Exec(testClientCommand)
o := echoClnContainer.Exec(true, testClientCommand)
s.Log(o)
}
@ -128,7 +128,7 @@ func testRetryAttach(s *VethsSuite, proto string) {
echoSrvContainer.CreateFile("/vcl.conf", getVclConfig(echoSrvContainer))
echoSrvContainer.AddEnvVar("VCL_CONFIG", "/vcl.conf")
echoSrvContainer.ExecServer("vcl_test_server -p " + proto + " 12346")
echoSrvContainer.ExecServer(true, "vcl_test_server -p "+proto+" 12346")
s.Log("This whole test case can take around 3 minutes to run. Please be patient.")
s.Log("... Running first echo client test, before disconnect.")
@ -141,14 +141,14 @@ func testRetryAttach(s *VethsSuite, proto string) {
testClientCommand := "vcl_test_client -U -p " + proto + " " + serverVethAddress + " 12346"
echoClnContainer.AddEnvVar("VCL_CONFIG", "/vcl.conf")
o := echoClnContainer.Exec(testClientCommand)
o := echoClnContainer.Exec(true, testClientCommand)
s.Log(o)
s.Log("... First test ended. Stopping VPP server now.")
// Stop server-vpp-instance, start it again and then run vcl-test-client once more
srvVppContainer.VppInstance.Disconnect()
stopVppCommand := "/bin/bash -c 'ps -C vpp_main -o pid= | xargs kill -9'"
srvVppContainer.Exec(stopVppCommand)
srvVppContainer.Exec(false, stopVppCommand)
s.SetupServerVpp()
@ -156,7 +156,7 @@ func testRetryAttach(s *VethsSuite, proto string) {
time.Sleep(30 * time.Second) // Wait a moment for the re-attachment to happen
s.Log("... Running second echo client test, after disconnect and re-attachment.")
o = echoClnContainer.Exec(testClientCommand)
o = echoClnContainer.Exec(true, testClientCommand)
s.Log(o)
s.Log("Done.")
}