2019-10-31 13:31:07 -05:00
|
|
|
#!/usr/bin/env python3
|
2016-10-03 19:44:57 +02:00
|
|
|
|
2017-02-14 02:55:31 +01:00
|
|
|
from __future__ import print_function
|
2019-12-01 22:24:28 -05:00
|
|
|
import logging
|
Revert "VPP-1508 python3 tests: raw_input"
This reverts commit 72f0004ac5f6e201cbe042593d76df6f2491d743.
Reason for revert: Traceback (most recent call last):
File "/home/ksekera/vpp/test/test_ipsec_ah.py", line 36, in
setUpClass
super(TemplateIpsecAh, cls).setUpClass()
File "/home/ksekera/vpp/test/template_ipsec.py", line 105, in
setUpClass
super(TemplateIpsec, cls).setUpClass()
File "/home/ksekera/vpp/test/framework.py", line 459, in setUpClass
cls.quit()
File "/home/ksekera/vpp/test/framework.py", line 475, in quit
six.input("When done debugging, press ENTER to kill the "
AttributeError: 'module' object has no attribute 'input'
Change-Id: Idf0bbfea231730b37bae5dcb4557a0f82ab1b810
Signed-off-by: Ole Troan <ot@cisco.com>
2018-11-26 10:27:50 +00:00
|
|
|
import sys
|
2017-02-14 02:55:31 +01:00
|
|
|
import os
|
|
|
|
import select
|
2019-06-20 12:24:12 -04:00
|
|
|
import signal
|
2020-12-05 22:39:14 +00:00
|
|
|
import subprocess
|
Revert "VPP-1508 python3 tests: raw_input"
This reverts commit 72f0004ac5f6e201cbe042593d76df6f2491d743.
Reason for revert: Traceback (most recent call last):
File "/home/ksekera/vpp/test/test_ipsec_ah.py", line 36, in
setUpClass
super(TemplateIpsecAh, cls).setUpClass()
File "/home/ksekera/vpp/test/template_ipsec.py", line 105, in
setUpClass
super(TemplateIpsec, cls).setUpClass()
File "/home/ksekera/vpp/test/framework.py", line 459, in setUpClass
cls.quit()
File "/home/ksekera/vpp/test/framework.py", line 475, in quit
six.input("When done debugging, press ENTER to kill the "
AttributeError: 'module' object has no attribute 'input'
Change-Id: Idf0bbfea231730b37bae5dcb4557a0f82ab1b810
Signed-off-by: Ole Troan <ot@cisco.com>
2018-11-26 10:27:50 +00:00
|
|
|
import unittest
|
2021-05-31 16:08:53 +02:00
|
|
|
import re
|
2016-10-28 13:20:27 +02:00
|
|
|
import time
|
Revert "VPP-1508 python3 tests: raw_input"
This reverts commit 72f0004ac5f6e201cbe042593d76df6f2491d743.
Reason for revert: Traceback (most recent call last):
File "/home/ksekera/vpp/test/test_ipsec_ah.py", line 36, in
setUpClass
super(TemplateIpsecAh, cls).setUpClass()
File "/home/ksekera/vpp/test/template_ipsec.py", line 105, in
setUpClass
super(TemplateIpsec, cls).setUpClass()
File "/home/ksekera/vpp/test/framework.py", line 459, in setUpClass
cls.quit()
File "/home/ksekera/vpp/test/framework.py", line 475, in quit
six.input("When done debugging, press ENTER to kill the "
AttributeError: 'module' object has no attribute 'input'
Change-Id: Idf0bbfea231730b37bae5dcb4557a0f82ab1b810
Signed-off-by: Ole Troan <ot@cisco.com>
2018-11-26 10:27:50 +00:00
|
|
|
import faulthandler
|
|
|
|
import random
|
|
|
|
import copy
|
2018-11-29 09:37:08 +01:00
|
|
|
import platform
|
2021-05-31 16:08:53 +02:00
|
|
|
import shutil
|
2016-12-08 10:16:41 +01:00
|
|
|
from collections import deque
|
2018-11-25 11:05:13 -08:00
|
|
|
from threading import Thread, Event
|
Revert "VPP-1508 python3 tests: raw_input"
This reverts commit 72f0004ac5f6e201cbe042593d76df6f2491d743.
Reason for revert: Traceback (most recent call last):
File "/home/ksekera/vpp/test/test_ipsec_ah.py", line 36, in
setUpClass
super(TemplateIpsecAh, cls).setUpClass()
File "/home/ksekera/vpp/test/template_ipsec.py", line 105, in
setUpClass
super(TemplateIpsec, cls).setUpClass()
File "/home/ksekera/vpp/test/framework.py", line 459, in setUpClass
cls.quit()
File "/home/ksekera/vpp/test/framework.py", line 475, in quit
six.input("When done debugging, press ENTER to kill the "
AttributeError: 'module' object has no attribute 'input'
Change-Id: Idf0bbfea231730b37bae5dcb4557a0f82ab1b810
Signed-off-by: Ole Troan <ot@cisco.com>
2018-11-26 10:27:50 +00:00
|
|
|
from inspect import getdoc, isclass
|
2018-11-25 11:05:13 -08:00
|
|
|
from traceback import format_exception
|
Revert "VPP-1508 python3 tests: raw_input"
This reverts commit 72f0004ac5f6e201cbe042593d76df6f2491d743.
Reason for revert: Traceback (most recent call last):
File "/home/ksekera/vpp/test/test_ipsec_ah.py", line 36, in
setUpClass
super(TemplateIpsecAh, cls).setUpClass()
File "/home/ksekera/vpp/test/template_ipsec.py", line 105, in
setUpClass
super(TemplateIpsec, cls).setUpClass()
File "/home/ksekera/vpp/test/framework.py", line 459, in setUpClass
cls.quit()
File "/home/ksekera/vpp/test/framework.py", line 475, in quit
six.input("When done debugging, press ENTER to kill the "
AttributeError: 'module' object has no attribute 'input'
Change-Id: Idf0bbfea231730b37bae5dcb4557a0f82ab1b810
Signed-off-by: Ole Troan <ot@cisco.com>
2018-11-26 10:27:50 +00:00
|
|
|
from logging import FileHandler, DEBUG, Formatter
|
2021-01-14 10:19:08 +00:00
|
|
|
from enum import Enum
|
2021-04-08 19:37:41 +02:00
|
|
|
from abc import ABC, abstractmethod
|
2021-09-22 11:24:06 +01:00
|
|
|
from struct import pack, unpack
|
2019-03-10 10:04:23 -07:00
|
|
|
|
|
|
|
import scapy.compat
|
2022-01-10 21:57:27 +00:00
|
|
|
from scapy.packet import Raw, Packet
|
2021-05-31 16:08:53 +02:00
|
|
|
from config import config, available_cpus, num_cpus, max_vpp_cpus
|
2019-06-20 12:24:12 -04:00
|
|
|
import hook as hookmodule
|
2018-12-17 21:43:43 -08:00
|
|
|
from vpp_pg_interface import VppPGInterface
|
2018-12-21 16:04:22 +01:00
|
|
|
from vpp_sub_interface import VppSubInterface
|
Revert "VPP-1508 python3 tests: raw_input"
This reverts commit 72f0004ac5f6e201cbe042593d76df6f2491d743.
Reason for revert: Traceback (most recent call last):
File "/home/ksekera/vpp/test/test_ipsec_ah.py", line 36, in
setUpClass
super(TemplateIpsecAh, cls).setUpClass()
File "/home/ksekera/vpp/test/template_ipsec.py", line 105, in
setUpClass
super(TemplateIpsec, cls).setUpClass()
File "/home/ksekera/vpp/test/framework.py", line 459, in setUpClass
cls.quit()
File "/home/ksekera/vpp/test/framework.py", line 475, in quit
six.input("When done debugging, press ENTER to kill the "
AttributeError: 'module' object has no attribute 'input'
Change-Id: Idf0bbfea231730b37bae5dcb4557a0f82ab1b810
Signed-off-by: Ole Troan <ot@cisco.com>
2018-11-26 10:27:50 +00:00
|
|
|
from vpp_lo_interface import VppLoInterface
|
2019-03-15 02:16:20 -07:00
|
|
|
from vpp_bvi_interface import VppBviInterface
|
Revert "VPP-1508 python3 tests: raw_input"
This reverts commit 72f0004ac5f6e201cbe042593d76df6f2491d743.
Reason for revert: Traceback (most recent call last):
File "/home/ksekera/vpp/test/test_ipsec_ah.py", line 36, in
setUpClass
super(TemplateIpsecAh, cls).setUpClass()
File "/home/ksekera/vpp/test/template_ipsec.py", line 105, in
setUpClass
super(TemplateIpsec, cls).setUpClass()
File "/home/ksekera/vpp/test/framework.py", line 459, in setUpClass
cls.quit()
File "/home/ksekera/vpp/test/framework.py", line 475, in quit
six.input("When done debugging, press ENTER to kill the "
AttributeError: 'module' object has no attribute 'input'
Change-Id: Idf0bbfea231730b37bae5dcb4557a0f82ab1b810
Signed-off-by: Ole Troan <ot@cisco.com>
2018-11-26 10:27:50 +00:00
|
|
|
from vpp_papi_provider import VppPapiProvider
|
2021-06-03 14:43:21 +00:00
|
|
|
from vpp_papi import VppEnum
|
2019-12-02 21:42:28 -05:00
|
|
|
import vpp_papi
|
Revert "VPP-1508 python3 tests: raw_input"
This reverts commit 72f0004ac5f6e201cbe042593d76df6f2491d743.
Reason for revert: Traceback (most recent call last):
File "/home/ksekera/vpp/test/test_ipsec_ah.py", line 36, in
setUpClass
super(TemplateIpsecAh, cls).setUpClass()
File "/home/ksekera/vpp/test/template_ipsec.py", line 105, in
setUpClass
super(TemplateIpsec, cls).setUpClass()
File "/home/ksekera/vpp/test/framework.py", line 459, in setUpClass
cls.quit()
File "/home/ksekera/vpp/test/framework.py", line 475, in quit
six.input("When done debugging, press ENTER to kill the "
AttributeError: 'module' object has no attribute 'input'
Change-Id: Idf0bbfea231730b37bae5dcb4557a0f82ab1b810
Signed-off-by: Ole Troan <ot@cisco.com>
2018-11-26 10:27:50 +00:00
|
|
|
from vpp_papi.vpp_stats import VPPStats
|
2021-03-03 10:40:05 +01:00
|
|
|
from vpp_papi.vpp_transport_socket import VppTransportSocketIOError
|
2022-04-26 19:02:15 +02:00
|
|
|
from log import (
|
|
|
|
RED,
|
|
|
|
GREEN,
|
|
|
|
YELLOW,
|
|
|
|
double_line_delim,
|
|
|
|
single_line_delim,
|
|
|
|
get_logger,
|
|
|
|
colorize,
|
|
|
|
)
|
2018-11-25 11:05:13 -08:00
|
|
|
from vpp_object import VppObjectRegistry
|
Revert "VPP-1508 python3 tests: raw_input"
This reverts commit 72f0004ac5f6e201cbe042593d76df6f2491d743.
Reason for revert: Traceback (most recent call last):
File "/home/ksekera/vpp/test/test_ipsec_ah.py", line 36, in
setUpClass
super(TemplateIpsecAh, cls).setUpClass()
File "/home/ksekera/vpp/test/template_ipsec.py", line 105, in
setUpClass
super(TemplateIpsec, cls).setUpClass()
File "/home/ksekera/vpp/test/framework.py", line 459, in setUpClass
cls.quit()
File "/home/ksekera/vpp/test/framework.py", line 475, in quit
six.input("When done debugging, press ENTER to kill the "
AttributeError: 'module' object has no attribute 'input'
Change-Id: Idf0bbfea231730b37bae5dcb4557a0f82ab1b810
Signed-off-by: Ole Troan <ot@cisco.com>
2018-11-26 10:27:50 +00:00
|
|
|
from util import ppp, is_core_present
|
|
|
|
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
|
|
|
|
from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
|
|
|
|
from scapy.layers.inet6 import ICMPv6EchoReply
|
2022-08-30 13:59:03 -07:00
|
|
|
from vpp_running import use_running
|
2023-03-29 16:04:58 +02:00
|
|
|
from test_result_code import TestResultCode
|
2018-12-09 15:37:04 -08:00
|
|
|
|
2021-04-08 19:37:41 +02:00
|
|
|
|
2019-12-01 22:24:28 -05:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
# Set up an empty logger for the testcase that can be overridden as necessary
|
2022-04-26 19:02:15 +02:00
|
|
|
null_logger = logging.getLogger("VppTestCase")
|
2019-12-01 22:24:28 -05:00
|
|
|
null_logger.addHandler(logging.NullHandler())
|
|
|
|
|
2019-07-03 08:38:38 -04:00
|
|
|
|
2021-05-31 16:08:53 +02:00
|
|
|
if config.debug_framework:
|
2018-02-17 13:41:33 +01:00
|
|
|
import debug_internal
|
|
|
|
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Test framework module.
|
|
|
|
|
|
|
|
The module provides a set of tools for constructing and running tests and
|
|
|
|
representing the results.
|
|
|
|
"""
|
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
|
2019-06-20 12:24:12 -04:00
|
|
|
class VppDiedError(Exception):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""exception for reporting that the subprocess has died."""
|
2019-06-20 12:24:12 -04:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
signals_by_value = {
|
|
|
|
v: k
|
|
|
|
for k, v in signal.__dict__.items()
|
|
|
|
if k.startswith("SIG") and not k.startswith("SIG_")
|
|
|
|
}
|
2019-06-20 12:24:12 -04:00
|
|
|
|
2019-06-19 10:29:24 -04:00
|
|
|
def __init__(self, rv=None, testcase=None, method_name=None):
|
2019-06-20 12:24:12 -04:00
|
|
|
self.rv = rv
|
|
|
|
self.signal_name = None
|
2019-06-19 10:29:24 -04:00
|
|
|
self.testcase = testcase
|
|
|
|
self.method_name = method_name
|
|
|
|
|
2019-06-20 12:24:12 -04:00
|
|
|
try:
|
|
|
|
self.signal_name = VppDiedError.signals_by_value[-rv]
|
2019-06-26 20:45:08 -04:00
|
|
|
except (KeyError, TypeError):
|
2019-06-20 12:24:12 -04:00
|
|
|
pass
|
|
|
|
|
2019-06-19 10:29:24 -04:00
|
|
|
if testcase is None and method_name is None:
|
2022-04-26 19:02:15 +02:00
|
|
|
in_msg = ""
|
2019-06-19 10:29:24 -04:00
|
|
|
else:
|
2022-04-26 19:02:15 +02:00
|
|
|
in_msg = " while running %s.%s" % (testcase, method_name)
|
2021-03-12 18:16:10 +01:00
|
|
|
|
|
|
|
if self.rv:
|
2022-04-26 19:02:15 +02:00
|
|
|
msg = "VPP subprocess died unexpectedly%s with return code: %d%s." % (
|
|
|
|
in_msg,
|
|
|
|
self.rv,
|
|
|
|
" [%s]" % (self.signal_name if self.signal_name is not None else ""),
|
|
|
|
)
|
2021-03-12 18:16:10 +01:00
|
|
|
else:
|
|
|
|
msg = "VPP subprocess died unexpectedly%s." % in_msg
|
2019-06-19 10:29:24 -04:00
|
|
|
|
2019-06-20 12:24:12 -04:00
|
|
|
super(VppDiedError, self).__init__(msg)
|
|
|
|
|
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
class _PacketInfo(object):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""Private class to create packet info object.
|
|
|
|
|
|
|
|
Help process information about the next packet.
|
|
|
|
Set variables to default values.
|
|
|
|
"""
|
2022-04-26 19:02:15 +02:00
|
|
|
|
2016-11-11 11:38:55 +01:00
|
|
|
#: Store the index of the packet.
|
2016-10-03 19:44:57 +02:00
|
|
|
index = -1
|
2016-11-11 11:38:55 +01:00
|
|
|
#: Store the index of the source packet generator interface of the packet.
|
2016-10-03 19:44:57 +02:00
|
|
|
src = -1
|
2016-11-11 11:38:55 +01:00
|
|
|
#: Store the index of the destination packet generator interface
|
|
|
|
#: of the packet.
|
2016-10-03 19:44:57 +02:00
|
|
|
dst = -1
|
2017-03-02 15:22:47 +01:00
|
|
|
#: Store expected ip version
|
|
|
|
ip = -1
|
|
|
|
#: Store expected upper protocol
|
|
|
|
proto = -1
|
2016-11-11 11:38:55 +01:00
|
|
|
#: Store the copy of the former packet.
|
2016-10-03 19:44:57 +02:00
|
|
|
data = None
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2016-12-07 15:09:13 +01:00
|
|
|
def __eq__(self, other):
|
|
|
|
index = self.index == other.index
|
|
|
|
src = self.src == other.src
|
|
|
|
dst = self.dst == other.dst
|
|
|
|
data = self.data == other.data
|
|
|
|
return index and src and dst and data
|
|
|
|
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2017-02-14 02:55:31 +01:00
|
|
|
def pump_output(testclass):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""pump output from vpp stdout/stderr to proper queues"""
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(testclass, "vpp"):
|
|
|
|
return
|
2017-11-09 09:16:39 +01:00
|
|
|
stdout_fragment = ""
|
|
|
|
stderr_fragment = ""
|
2018-07-23 05:35:56 -04:00
|
|
|
while not testclass.pump_thread_stop_flag.is_set():
|
2022-04-26 19:02:15 +02:00
|
|
|
readable = select.select(
|
|
|
|
[
|
|
|
|
testclass.vpp.stdout.fileno(),
|
|
|
|
testclass.vpp.stderr.fileno(),
|
|
|
|
testclass.pump_thread_wakeup_pipe[0],
|
|
|
|
],
|
|
|
|
[],
|
|
|
|
[],
|
|
|
|
)[0]
|
2017-02-14 02:55:31 +01:00
|
|
|
if testclass.vpp.stdout.fileno() in readable:
|
2017-11-09 09:16:39 +01:00
|
|
|
read = os.read(testclass.vpp.stdout.fileno(), 102400)
|
|
|
|
if len(read) > 0:
|
2022-04-26 19:02:15 +02:00
|
|
|
split = read.decode("ascii", errors="backslashreplace").splitlines(True)
|
2017-11-09 09:16:39 +01:00
|
|
|
if len(stdout_fragment) > 0:
|
|
|
|
split[0] = "%s%s" % (stdout_fragment, split[0])
|
|
|
|
if len(split) > 0 and split[-1].endswith("\n"):
|
|
|
|
limit = None
|
|
|
|
else:
|
|
|
|
limit = -1
|
|
|
|
stdout_fragment = split[-1]
|
|
|
|
testclass.vpp_stdout_deque.extend(split[:limit])
|
2021-05-31 16:08:53 +02:00
|
|
|
if not config.cache_vpp_output:
|
2017-11-09 09:16:39 +01:00
|
|
|
for line in split[:limit]:
|
2022-04-26 19:02:15 +02:00
|
|
|
testclass.logger.info("VPP STDOUT: %s" % line.rstrip("\n"))
|
2017-02-14 02:55:31 +01:00
|
|
|
if testclass.vpp.stderr.fileno() in readable:
|
2017-11-09 09:16:39 +01:00
|
|
|
read = os.read(testclass.vpp.stderr.fileno(), 102400)
|
|
|
|
if len(read) > 0:
|
2022-04-26 19:02:15 +02:00
|
|
|
split = read.decode("ascii", errors="backslashreplace").splitlines(True)
|
2017-11-09 09:16:39 +01:00
|
|
|
if len(stderr_fragment) > 0:
|
|
|
|
split[0] = "%s%s" % (stderr_fragment, split[0])
|
2019-10-15 19:31:55 +02:00
|
|
|
if len(split) > 0 and split[-1].endswith("\n"):
|
2017-11-09 09:16:39 +01:00
|
|
|
limit = None
|
|
|
|
else:
|
|
|
|
limit = -1
|
|
|
|
stderr_fragment = split[-1]
|
2019-10-15 19:31:55 +02:00
|
|
|
|
2017-11-09 09:16:39 +01:00
|
|
|
testclass.vpp_stderr_deque.extend(split[:limit])
|
2021-05-31 16:08:53 +02:00
|
|
|
if not config.cache_vpp_output:
|
2017-11-09 09:16:39 +01:00
|
|
|
for line in split[:limit]:
|
2022-04-26 19:02:15 +02:00
|
|
|
testclass.logger.error("VPP STDERR: %s" % line.rstrip("\n"))
|
2018-12-09 15:37:04 -08:00
|
|
|
# ignoring the dummy pipe here intentionally - the
|
|
|
|
# flag will take care of properly terminating the loop
|
2016-10-28 13:20:27 +02:00
|
|
|
|
|
|
|
|
2018-12-06 07:46:13 -08:00
|
|
|
def _is_platform_aarch64():
|
2022-04-26 19:02:15 +02:00
|
|
|
return platform.machine() == "aarch64"
|
2018-11-29 09:37:08 +01:00
|
|
|
|
2019-05-16 14:34:55 +02:00
|
|
|
|
2018-12-06 07:46:13 -08:00
|
|
|
is_platform_aarch64 = _is_platform_aarch64()
|
|
|
|
|
2018-11-29 09:37:08 +01:00
|
|
|
|
2022-09-18 22:28:44 -04:00
|
|
|
def _is_distro_ubuntu2204():
|
|
|
|
with open("/etc/os-release") as f:
|
|
|
|
for line in f.readlines():
|
|
|
|
if "jammy" in line:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
is_distro_ubuntu2204 = _is_distro_ubuntu2204()
|
|
|
|
|
|
|
|
|
2022-09-20 21:52:18 -04:00
|
|
|
def _is_distro_debian11():
|
|
|
|
with open("/etc/os-release") as f:
|
|
|
|
for line in f.readlines():
|
|
|
|
if "bullseye" in line:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
is_distro_debian11 = _is_distro_debian11()
|
|
|
|
|
|
|
|
|
2017-08-08 04:33:53 +02:00
|
|
|
class KeepAliveReporter(object):
|
|
|
|
"""
|
|
|
|
Singleton object which reports test start to parent process
|
|
|
|
"""
|
2022-04-26 19:02:15 +02:00
|
|
|
|
2017-08-08 04:33:53 +02:00
|
|
|
_shared_state = {}
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.__dict__ = self._shared_state
|
VPP-1498: test/framework.py: AttributeError
Traceback if .send_keep_alive is called before pipe is configured.
Traceback (most recent call last):
File "/vpp/test/test_span.py", line 27, in setUpClass
super(TestSpan, cls).setUpClass()
File "/vpp/test/framework.py", line 411, in setUpClass
cls.reporter.send_keep_alive(cls, 'setUpClass')
File "/vpp/test/framework.py", line 172, in send_keep_alive
if self.pipe is None:
File "/vpp/test/framework.py", line 160, in pipe
return self._pipe
AttributeError: 'KeepAliveReporter' object has no attribute '_pipe'
Change-Id: I561d2748441702478a84fbb4580a4d2667d70ffd
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2018-11-18 08:17:34 -08:00
|
|
|
self._pipe = None
|
2017-08-08 04:33:53 +02:00
|
|
|
|
|
|
|
@property
|
|
|
|
def pipe(self):
|
|
|
|
return self._pipe
|
|
|
|
|
|
|
|
@pipe.setter
|
|
|
|
def pipe(self, pipe):
|
VPP-1498: test/framework.py: AttributeError
Traceback if .send_keep_alive is called before pipe is configured.
Traceback (most recent call last):
File "/vpp/test/test_span.py", line 27, in setUpClass
super(TestSpan, cls).setUpClass()
File "/vpp/test/framework.py", line 411, in setUpClass
cls.reporter.send_keep_alive(cls, 'setUpClass')
File "/vpp/test/framework.py", line 172, in send_keep_alive
if self.pipe is None:
File "/vpp/test/framework.py", line 160, in pipe
return self._pipe
AttributeError: 'KeepAliveReporter' object has no attribute '_pipe'
Change-Id: I561d2748441702478a84fbb4580a4d2667d70ffd
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2018-11-18 08:17:34 -08:00
|
|
|
if self._pipe is not None:
|
2017-08-08 04:33:53 +02:00
|
|
|
raise Exception("Internal error - pipe should only be set once.")
|
|
|
|
self._pipe = pipe
|
|
|
|
|
2018-09-21 13:55:16 +02:00
|
|
|
def send_keep_alive(self, test, desc=None):
|
2017-08-08 04:33:53 +02:00
|
|
|
"""
|
|
|
|
Write current test tmpdir & desc to keep-alive pipe to signal liveness
|
|
|
|
"""
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(test, "vpp") or self.pipe is None:
|
2017-08-11 06:56:05 +02:00
|
|
|
# if not running forked..
|
|
|
|
return
|
|
|
|
|
2017-08-08 04:33:53 +02:00
|
|
|
if isclass(test):
|
2022-04-26 19:02:15 +02:00
|
|
|
desc = "%s (%s)" % (desc, unittest.util.strclass(test))
|
2017-08-08 04:33:53 +02:00
|
|
|
else:
|
2018-09-21 13:55:16 +02:00
|
|
|
desc = test.id()
|
2017-08-08 04:33:53 +02:00
|
|
|
|
2021-05-31 16:08:53 +02:00
|
|
|
self.pipe.send((desc, config.vpp, test.tempdir, test.vpp.pid))
|
2017-08-08 04:33:53 +02:00
|
|
|
|
|
|
|
|
2021-01-14 10:19:08 +00:00
|
|
|
class TestCaseTag(Enum):
|
2021-01-20 20:30:36 +00:00
|
|
|
# marks the suites that must run at the end
|
|
|
|
# using only a single test runner
|
2021-01-14 10:19:08 +00:00
|
|
|
RUN_SOLO = 1
|
2021-01-20 20:30:36 +00:00
|
|
|
# marks the suites broken on VPP multi-worker
|
|
|
|
FIXME_VPP_WORKERS = 2
|
2021-09-09 17:57:02 -07:00
|
|
|
# marks the suites broken when ASan is enabled
|
|
|
|
FIXME_ASAN = 3
|
2022-09-18 22:28:44 -04:00
|
|
|
# marks suites broken on Ubuntu-22.04
|
|
|
|
FIXME_UBUNTU2204 = 4
|
2022-09-20 21:52:18 -04:00
|
|
|
# marks suites broken on Debian-11
|
|
|
|
FIXME_DEBIAN11 = 5
|
2022-10-04 22:02:49 -04:00
|
|
|
# marks suites broken on debug vpp image
|
|
|
|
FIXME_VPP_DEBUG = 6
|
2021-01-14 10:19:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
def create_tag_decorator(e):
|
|
|
|
def decorator(cls):
|
|
|
|
try:
|
|
|
|
cls.test_tags.append(e)
|
|
|
|
except AttributeError:
|
|
|
|
cls.test_tags = [e]
|
|
|
|
return cls
|
2022-04-26 19:02:15 +02:00
|
|
|
|
2021-01-14 10:19:08 +00:00
|
|
|
return decorator
|
|
|
|
|
2021-01-20 20:30:36 +00:00
|
|
|
|
2021-01-14 10:19:08 +00:00
|
|
|
tag_run_solo = create_tag_decorator(TestCaseTag.RUN_SOLO)
|
2021-01-20 20:30:36 +00:00
|
|
|
tag_fixme_vpp_workers = create_tag_decorator(TestCaseTag.FIXME_VPP_WORKERS)
|
2021-09-09 17:57:02 -07:00
|
|
|
tag_fixme_asan = create_tag_decorator(TestCaseTag.FIXME_ASAN)
|
2022-09-18 22:28:44 -04:00
|
|
|
tag_fixme_ubuntu2204 = create_tag_decorator(TestCaseTag.FIXME_UBUNTU2204)
|
2022-09-20 21:52:18 -04:00
|
|
|
tag_fixme_debian11 = create_tag_decorator(TestCaseTag.FIXME_DEBIAN11)
|
2022-10-04 22:02:49 -04:00
|
|
|
tag_fixme_vpp_debug = create_tag_decorator(TestCaseTag.FIXME_VPP_DEBUG)
|
2021-01-14 10:19:08 +00:00
|
|
|
|
|
|
|
|
2021-03-16 12:52:12 +01:00
|
|
|
class DummyVpp:
|
|
|
|
returncode = None
|
2022-04-26 19:02:15 +02:00
|
|
|
pid = 0xCAFEBAFE
|
2021-03-16 12:52:12 +01:00
|
|
|
|
|
|
|
def poll(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def terminate(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2021-04-08 19:37:41 +02:00
|
|
|
class CPUInterface(ABC):
|
|
|
|
cpus = []
|
|
|
|
skipped_due_to_cpu_lack = False
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
@abstractmethod
|
|
|
|
def get_cpus_required(cls):
|
|
|
|
pass
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def assign_cpus(cls, cpus):
|
|
|
|
cls.cpus = cpus
|
|
|
|
|
|
|
|
|
2022-08-30 13:59:03 -07:00
|
|
|
@use_running
|
2021-04-08 19:37:41 +02:00
|
|
|
class VppTestCase(CPUInterface, unittest.TestCase):
|
2016-11-11 11:38:55 +01:00
|
|
|
"""This subclass is a base class for VPP test cases that are implemented as
|
|
|
|
classes. It provides methods to create and run test case.
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
|
2021-03-11 10:26:54 -08:00
|
|
|
extra_vpp_statseg_config = ""
|
2023-01-26 12:35:35 +01:00
|
|
|
extra_vpp_config = []
|
2018-12-21 16:04:22 +01:00
|
|
|
extra_vpp_plugin_config = []
|
2019-12-01 22:24:28 -05:00
|
|
|
logger = null_logger
|
2019-10-26 22:25:49 -04:00
|
|
|
vapi_response_timeout = 5
|
2022-02-18 10:30:51 +00:00
|
|
|
remove_configured_vpp_objects_on_tear_down = True
|
2018-11-28 07:42:11 +01:00
|
|
|
|
2016-10-11 11:47:09 +02:00
|
|
|
@property
|
|
|
|
def packet_infos(self):
|
|
|
|
"""List of packet infos"""
|
|
|
|
return self._packet_infos
|
|
|
|
|
2016-12-21 08:50:14 +01:00
|
|
|
@classmethod
|
|
|
|
def get_packet_count_for_if_idx(cls, dst_if_index):
|
|
|
|
"""Get the number of packet info for specified destination if index"""
|
|
|
|
if dst_if_index in cls._packet_count_for_dst_if_idx:
|
|
|
|
return cls._packet_count_for_dst_if_idx[dst_if_index]
|
|
|
|
else:
|
|
|
|
return 0
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2020-08-26 14:33:54 +00:00
|
|
|
@classmethod
|
2021-01-14 10:19:08 +00:00
|
|
|
def has_tag(cls, tag):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""if the test case has a given tag - return true"""
|
2021-01-14 10:19:08 +00:00
|
|
|
try:
|
|
|
|
return tag in cls.test_tags
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
2020-08-26 14:33:54 +00:00
|
|
|
return False
|
|
|
|
|
2021-01-14 10:19:08 +00:00
|
|
|
@classmethod
|
|
|
|
def is_tagged_run_solo(cls):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""if the test case class is timing-sensitive - return true"""
|
2021-01-14 10:19:08 +00:00
|
|
|
return cls.has_tag(TestCaseTag.RUN_SOLO)
|
|
|
|
|
2021-09-09 17:57:02 -07:00
|
|
|
@classmethod
|
|
|
|
def skip_fixme_asan(cls):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""if @tag_fixme_asan & ASan is enabled - mark for skip"""
|
2021-09-09 17:57:02 -07:00
|
|
|
if cls.has_tag(TestCaseTag.FIXME_ASAN):
|
2022-04-26 19:02:15 +02:00
|
|
|
vpp_extra_cmake_args = os.environ.get("VPP_EXTRA_CMAKE_ARGS", "")
|
|
|
|
if "DVPP_ENABLE_SANITIZE_ADDR=ON" in vpp_extra_cmake_args:
|
2021-09-09 17:57:02 -07:00
|
|
|
cls = unittest.skip("Skipping @tag_fixme_asan tests")(cls)
|
|
|
|
|
2022-09-18 22:28:44 -04:00
|
|
|
@classmethod
|
|
|
|
def skip_fixme_ubuntu2204(cls):
|
|
|
|
"""if distro is ubuntu 22.04 and @tag_fixme_ubuntu2204 mark for skip"""
|
|
|
|
if cls.has_tag(TestCaseTag.FIXME_UBUNTU2204):
|
|
|
|
cls = unittest.skip("Skipping @tag_fixme_ubuntu2204 tests")(cls)
|
|
|
|
|
2022-09-20 21:52:18 -04:00
|
|
|
@classmethod
|
|
|
|
def skip_fixme_debian11(cls):
|
|
|
|
"""if distro is Debian-11 and @tag_fixme_debian11 mark for skip"""
|
|
|
|
if cls.has_tag(TestCaseTag.FIXME_DEBIAN11):
|
|
|
|
cls = unittest.skip("Skipping @tag_fixme_debian11 tests")(cls)
|
|
|
|
|
2022-10-04 22:02:49 -04:00
|
|
|
@classmethod
|
|
|
|
def skip_fixme_vpp_debug(cls):
|
|
|
|
cls = unittest.skip("Skipping @tag_fixme_vpp_debug tests")(cls)
|
|
|
|
|
2016-10-11 11:47:09 +02:00
|
|
|
@classmethod
|
|
|
|
def instance(cls):
|
|
|
|
"""Return the instance of this testcase"""
|
|
|
|
return cls.test_instance
|
2016-10-03 19:44:57 +02:00
|
|
|
|
2016-10-28 13:20:27 +02:00
|
|
|
@classmethod
|
|
|
|
def set_debug_flags(cls, d):
|
2019-10-21 02:53:14 +00:00
|
|
|
cls.gdbserver_port = 7777
|
2016-10-28 13:20:27 +02:00
|
|
|
cls.debug_core = False
|
|
|
|
cls.debug_gdb = False
|
|
|
|
cls.debug_gdbserver = False
|
2019-10-21 02:53:14 +00:00
|
|
|
cls.debug_all = False
|
2021-03-16 12:52:12 +01:00
|
|
|
cls.debug_attach = False
|
2016-10-28 13:20:27 +02:00
|
|
|
if d is None:
|
|
|
|
return
|
|
|
|
dl = d.lower()
|
|
|
|
if dl == "core":
|
|
|
|
cls.debug_core = True
|
2019-10-21 02:53:14 +00:00
|
|
|
elif dl == "gdb" or dl == "gdb-all":
|
2016-10-28 13:20:27 +02:00
|
|
|
cls.debug_gdb = True
|
2019-10-21 02:53:14 +00:00
|
|
|
elif dl == "gdbserver" or dl == "gdbserver-all":
|
2016-10-28 13:20:27 +02:00
|
|
|
cls.debug_gdbserver = True
|
2021-03-16 12:52:12 +01:00
|
|
|
elif dl == "attach":
|
|
|
|
cls.debug_attach = True
|
2016-10-28 13:20:27 +02:00
|
|
|
else:
|
|
|
|
raise Exception("Unrecognized DEBUG option: '%s'" % d)
|
2019-10-21 02:53:14 +00:00
|
|
|
if dl == "gdb-all" or dl == "gdbserver-all":
|
|
|
|
cls.debug_all = True
|
2016-10-28 13:20:27 +02:00
|
|
|
|
2021-04-08 19:37:41 +02:00
|
|
|
@classmethod
|
|
|
|
def get_vpp_worker_count(cls):
|
|
|
|
if not hasattr(cls, "vpp_worker_count"):
|
|
|
|
if cls.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
|
|
|
|
cls.vpp_worker_count = 0
|
|
|
|
else:
|
2021-05-31 16:08:53 +02:00
|
|
|
cls.vpp_worker_count = config.vpp_worker_count
|
2021-04-08 19:37:41 +02:00
|
|
|
return cls.vpp_worker_count
|
2018-07-16 14:22:01 +02:00
|
|
|
|
2021-04-08 19:37:41 +02:00
|
|
|
@classmethod
|
|
|
|
def get_cpus_required(cls):
|
|
|
|
return 1 + cls.get_vpp_worker_count()
|
2018-07-16 14:22:01 +02:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
@classmethod
|
|
|
|
def setUpConstants(cls):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""Set-up the test case class based on environment variables"""
|
2021-05-31 16:08:53 +02:00
|
|
|
cls.step = config.step
|
|
|
|
cls.plugin_path = ":".join(config.vpp_plugin_dir)
|
|
|
|
cls.test_plugin_path = ":".join(config.vpp_test_plugin_dir)
|
|
|
|
cls.extern_plugin_path = ":".join(config.extern_plugin_dir)
|
2018-12-21 16:04:22 +01:00
|
|
|
debug_cli = ""
|
2016-11-02 09:25:05 +01:00
|
|
|
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
|
2018-12-21 16:04:22 +01:00
|
|
|
debug_cli = "cli-listen localhost:5002"
|
2021-05-31 16:08:53 +02:00
|
|
|
size = re.search(r"\d+[gG]", config.coredump_size)
|
|
|
|
if size:
|
|
|
|
coredump_size = f"coredump-size {config.coredump_size}".lower()
|
|
|
|
else:
|
2018-12-21 16:04:22 +01:00
|
|
|
coredump_size = "coredump-size unlimited"
|
2021-05-31 16:08:53 +02:00
|
|
|
default_variant = config.variant
|
2020-03-10 14:35:32 +00:00
|
|
|
if default_variant is not None:
|
2022-07-29 10:52:34 +02:00
|
|
|
default_variant = "default { variant %s 100 }" % default_variant
|
2020-03-10 14:35:32 +00:00
|
|
|
else:
|
|
|
|
default_variant = ""
|
|
|
|
|
2021-05-31 16:08:53 +02:00
|
|
|
api_fuzzing = config.api_fuzz
|
2020-04-29 17:04:10 -04:00
|
|
|
if api_fuzzing is None:
|
2022-04-26 19:02:15 +02:00
|
|
|
api_fuzzing = "off"
|
2020-04-29 17:04:10 -04:00
|
|
|
|
2021-03-15 16:58:10 +01:00
|
|
|
cls.vpp_cmdline = [
|
2021-05-31 16:08:53 +02:00
|
|
|
config.vpp,
|
2022-04-26 19:02:15 +02:00
|
|
|
"unix",
|
|
|
|
"{",
|
|
|
|
"nodaemon",
|
|
|
|
debug_cli,
|
|
|
|
"full-coredump",
|
|
|
|
coredump_size,
|
|
|
|
"runtime-dir",
|
|
|
|
cls.tempdir,
|
|
|
|
"}",
|
|
|
|
"api-trace",
|
|
|
|
"{",
|
|
|
|
"on",
|
|
|
|
"}",
|
|
|
|
"api-segment",
|
|
|
|
"{",
|
|
|
|
"prefix",
|
|
|
|
cls.get_api_segment_prefix(),
|
|
|
|
"}",
|
|
|
|
"cpu",
|
|
|
|
"{",
|
|
|
|
"main-core",
|
|
|
|
str(cls.cpus[0]),
|
|
|
|
]
|
2021-05-31 16:08:53 +02:00
|
|
|
if cls.extern_plugin_path not in (None, ""):
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.extra_vpp_plugin_config.append("add-path %s" % cls.extern_plugin_path)
|
2021-04-08 19:37:41 +02:00
|
|
|
if cls.get_vpp_worker_count():
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.vpp_cmdline.extend(
|
|
|
|
["corelist-workers", ",".join([str(x) for x in cls.cpus[1:]])]
|
|
|
|
)
|
|
|
|
cls.vpp_cmdline.extend(
|
|
|
|
[
|
|
|
|
"}",
|
|
|
|
"physmem",
|
|
|
|
"{",
|
|
|
|
"max-size",
|
|
|
|
"32m",
|
|
|
|
"}",
|
|
|
|
"statseg",
|
|
|
|
"{",
|
|
|
|
"socket-name",
|
|
|
|
cls.get_stats_sock_path(),
|
|
|
|
cls.extra_vpp_statseg_config,
|
|
|
|
"}",
|
|
|
|
"socksvr",
|
|
|
|
"{",
|
|
|
|
"socket-name",
|
|
|
|
cls.get_api_sock_path(),
|
|
|
|
"}",
|
|
|
|
"node { ",
|
|
|
|
default_variant,
|
|
|
|
"}",
|
|
|
|
"api-fuzz {",
|
|
|
|
api_fuzzing,
|
|
|
|
"}",
|
|
|
|
"plugins",
|
|
|
|
"{",
|
|
|
|
"plugin",
|
|
|
|
"dpdk_plugin.so",
|
|
|
|
"{",
|
|
|
|
"disable",
|
|
|
|
"}",
|
|
|
|
"plugin",
|
|
|
|
"rdma_plugin.so",
|
|
|
|
"{",
|
|
|
|
"disable",
|
|
|
|
"}",
|
|
|
|
"plugin",
|
|
|
|
"lisp_unittest_plugin.so",
|
|
|
|
"{",
|
|
|
|
"enable",
|
|
|
|
"}",
|
|
|
|
"plugin",
|
|
|
|
"unittest_plugin.so",
|
|
|
|
"{",
|
|
|
|
"enable",
|
|
|
|
"}",
|
|
|
|
]
|
|
|
|
+ cls.extra_vpp_plugin_config
|
|
|
|
+ [
|
|
|
|
"}",
|
|
|
|
]
|
|
|
|
)
|
2020-03-10 14:35:32 +00:00
|
|
|
|
2023-01-26 12:35:35 +01:00
|
|
|
if cls.extra_vpp_config is not None:
|
|
|
|
cls.vpp_cmdline.extend(cls.extra_vpp_config)
|
2019-05-08 19:18:18 -04:00
|
|
|
|
2021-03-16 12:52:12 +01:00
|
|
|
if not cls.debug_attach:
|
|
|
|
cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
|
|
|
|
cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
|
2016-10-28 13:20:27 +02:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def wait_for_enter(cls):
|
|
|
|
if cls.debug_gdbserver:
|
|
|
|
print(double_line_delim)
|
|
|
|
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
|
|
|
|
elif cls.debug_gdb:
|
|
|
|
print(double_line_delim)
|
|
|
|
print("Spawned VPP with PID: %d" % cls.vpp.pid)
|
|
|
|
else:
|
|
|
|
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
|
|
|
|
return
|
|
|
|
print(single_line_delim)
|
2019-10-21 02:53:14 +00:00
|
|
|
print("You can debug VPP using:")
|
2016-10-28 13:20:27 +02:00
|
|
|
if cls.debug_gdbserver:
|
2022-04-26 19:02:15 +02:00
|
|
|
print(
|
|
|
|
f"sudo gdb {config.vpp} "
|
|
|
|
f"-ex 'target remote localhost:{cls.gdbserver_port}'"
|
|
|
|
)
|
|
|
|
print(
|
|
|
|
"Now is the time to attach gdb by running the above "
|
|
|
|
"command, set up breakpoints etc., then resume VPP from "
|
|
|
|
"within gdb by issuing the 'continue' command"
|
|
|
|
)
|
2019-10-21 02:53:14 +00:00
|
|
|
cls.gdbserver_port += 1
|
2016-10-28 13:20:27 +02:00
|
|
|
elif cls.debug_gdb:
|
2021-05-31 16:08:53 +02:00
|
|
|
print(f"sudo gdb {config.vpp} -ex 'attach {cls.vpp.pid}'")
|
2022-04-26 19:02:15 +02:00
|
|
|
print(
|
|
|
|
"Now is the time to attach gdb by running the above "
|
|
|
|
"command and set up breakpoints etc., then resume VPP from"
|
|
|
|
" within gdb by issuing the 'continue' command"
|
|
|
|
)
|
2016-10-28 13:20:27 +02:00
|
|
|
print(single_line_delim)
|
2018-12-15 10:16:35 -08:00
|
|
|
input("Press ENTER to continue running the testcase...")
|
2016-10-28 13:20:27 +02:00
|
|
|
|
2021-03-16 12:52:12 +01:00
|
|
|
@classmethod
|
|
|
|
def attach_vpp(cls):
|
|
|
|
cls.vpp = DummyVpp()
|
|
|
|
|
2016-10-28 13:20:27 +02:00
|
|
|
@classmethod
|
|
|
|
def run_vpp(cls):
|
2022-09-20 21:52:18 -04:00
|
|
|
if (
|
|
|
|
is_distro_ubuntu2204 == True and cls.has_tag(TestCaseTag.FIXME_UBUNTU2204)
|
|
|
|
) or (is_distro_debian11 == True and cls.has_tag(TestCaseTag.FIXME_DEBIAN11)):
|
|
|
|
return
|
2021-04-08 19:37:41 +02:00
|
|
|
cls.logger.debug(f"Assigned cpus: {cls.cpus}")
|
2016-10-28 13:20:27 +02:00
|
|
|
cmdline = cls.vpp_cmdline
|
|
|
|
|
|
|
|
if cls.debug_gdbserver:
|
2022-04-26 19:02:15 +02:00
|
|
|
gdbserver = "/usr/bin/gdbserver"
|
|
|
|
if not os.path.isfile(gdbserver) or not os.access(gdbserver, os.X_OK):
|
|
|
|
raise Exception(
|
|
|
|
"gdbserver binary '%s' does not exist or is "
|
|
|
|
"not executable" % gdbserver
|
|
|
|
)
|
|
|
|
|
|
|
|
cmdline = [
|
|
|
|
gdbserver,
|
|
|
|
"localhost:{port}".format(port=cls.gdbserver_port),
|
|
|
|
] + cls.vpp_cmdline
|
2016-10-28 13:20:27 +02:00
|
|
|
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
|
|
|
|
|
2016-11-03 05:36:01 +01:00
|
|
|
try:
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.vpp = subprocess.Popen(
|
|
|
|
cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
|
|
|
)
|
2018-11-24 21:19:38 -08:00
|
|
|
except subprocess.CalledProcessError as e:
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.logger.critical(
|
|
|
|
"Subprocess returned with non-0 return code: (%s)", e.returncode
|
|
|
|
)
|
2018-11-28 11:34:21 -08:00
|
|
|
raise
|
|
|
|
except OSError as e:
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.logger.critical(
|
|
|
|
"Subprocess returned with OS error: (%s) %s", e.errno, e.strerror
|
|
|
|
)
|
2018-11-28 11:34:21 -08:00
|
|
|
raise
|
|
|
|
except Exception as e:
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.logger.exception("Subprocess returned unexpected from %s:", cmdline)
|
2016-11-03 05:36:01 +01:00
|
|
|
raise
|
|
|
|
|
2016-10-28 13:20:27 +02:00
|
|
|
cls.wait_for_enter()
|
2016-10-07 16:30:03 +01:00
|
|
|
|
2019-04-09 20:19:51 -04:00
|
|
|
@classmethod
|
2019-10-31 08:34:22 +00:00
|
|
|
def wait_for_coredump(cls):
|
|
|
|
corefile = cls.tempdir + "/core"
|
2019-04-09 20:19:51 -04:00
|
|
|
if os.path.isfile(corefile):
|
2019-10-31 08:34:22 +00:00
|
|
|
cls.logger.error("Waiting for coredump to complete: %s", corefile)
|
2019-04-09 20:19:51 -04:00
|
|
|
curr_size = os.path.getsize(corefile)
|
2019-10-31 08:34:22 +00:00
|
|
|
deadline = time.time() + 60
|
2019-04-09 20:19:51 -04:00
|
|
|
ok = False
|
|
|
|
while time.time() < deadline:
|
|
|
|
cls.sleep(1)
|
|
|
|
size = curr_size
|
|
|
|
curr_size = os.path.getsize(corefile)
|
|
|
|
if size == curr_size:
|
|
|
|
ok = True
|
|
|
|
break
|
|
|
|
if not ok:
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.logger.error(
|
|
|
|
"Timed out waiting for coredump to complete: %s", corefile
|
|
|
|
)
|
2019-04-09 20:19:51 -04:00
|
|
|
else:
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.logger.error("Coredump complete: %s, size %d", corefile, curr_size)
|
2019-04-09 20:19:51 -04:00
|
|
|
|
2021-03-16 12:52:12 +01:00
|
|
|
@classmethod
|
|
|
|
def get_stats_sock_path(cls):
|
|
|
|
return "%s/stats.sock" % cls.tempdir
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def get_api_sock_path(cls):
|
|
|
|
return "%s/api.sock" % cls.tempdir
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def get_api_segment_prefix(cls):
|
|
|
|
return os.path.basename(cls.tempdir) # Only used for VAPI
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def get_tempdir(cls):
|
2022-03-10 11:47:45 +01:00
|
|
|
if cls.debug_attach:
|
|
|
|
tmpdir = f"{config.tmp_dir}/unittest-attach-gdb"
|
|
|
|
else:
|
|
|
|
tmpdir = f"{config.tmp_dir}/vpp-unittest-{cls.__name__}"
|
|
|
|
if config.wipe_tmp_dir:
|
|
|
|
shutil.rmtree(tmpdir, ignore_errors=True)
|
|
|
|
os.mkdir(tmpdir)
|
2021-05-31 16:08:53 +02:00
|
|
|
return tmpdir
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def create_file_handler(cls):
|
|
|
|
if config.log_dir is None:
|
|
|
|
cls.file_handler = FileHandler(f"{cls.tempdir}/log.txt")
|
|
|
|
return
|
|
|
|
|
|
|
|
logdir = f"{config.log_dir}/vpp-unittest-{cls.__name__}"
|
|
|
|
if config.wipe_tmp_dir:
|
|
|
|
shutil.rmtree(logdir, ignore_errors=True)
|
|
|
|
os.mkdir(logdir)
|
|
|
|
cls.file_handler = FileHandler(f"{logdir}/log.txt")
|
2021-03-16 12:52:12 +01:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Perform class setup before running the testcase
|
|
|
|
Remove shared memory files, start vpp and connect the vpp-api
|
|
|
|
"""
|
2019-01-25 14:05:48 -08:00
|
|
|
super(VppTestCase, cls).setUpClass()
|
2018-11-09 11:58:54 +01:00
|
|
|
cls.logger = get_logger(cls.__name__)
|
2021-05-31 16:08:53 +02:00
|
|
|
random.seed(config.rnd_seed)
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(cls, "parallel_handler"):
|
2018-11-09 11:58:54 +01:00
|
|
|
cls.logger.addHandler(cls.parallel_handler)
|
2018-11-21 13:13:39 +01:00
|
|
|
cls.logger.propagate = False
|
2021-05-31 16:08:53 +02:00
|
|
|
cls.set_debug_flags(config.debug)
|
2021-03-16 12:52:12 +01:00
|
|
|
cls.tempdir = cls.get_tempdir()
|
2021-05-31 16:08:53 +02:00
|
|
|
cls.create_file_handler()
|
2017-04-11 06:01:53 +02:00
|
|
|
cls.file_handler.setFormatter(
|
2022-04-26 19:02:15 +02:00
|
|
|
Formatter(fmt="%(asctime)s,%(msecs)03d %(message)s", datefmt="%H:%M:%S")
|
|
|
|
)
|
2017-04-11 06:01:53 +02:00
|
|
|
cls.file_handler.setLevel(DEBUG)
|
|
|
|
cls.logger.addHandler(cls.file_handler)
|
2021-03-16 12:52:12 +01:00
|
|
|
cls.logger.debug("--- setUpClass() for %s called ---" % cls.__name__)
|
2016-10-11 11:47:09 +02:00
|
|
|
os.chdir(cls.tempdir)
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.logger.info(
|
|
|
|
"Temporary dir is %s, api socket is %s",
|
|
|
|
cls.tempdir,
|
|
|
|
cls.get_api_sock_path(),
|
|
|
|
)
|
2021-05-31 16:08:53 +02:00
|
|
|
cls.logger.debug("Random seed is %s", config.rnd_seed)
|
2016-10-03 19:44:57 +02:00
|
|
|
cls.setUpConstants()
|
2016-12-21 08:50:14 +01:00
|
|
|
cls.reset_packet_infos()
|
2021-04-01 18:19:29 +02:00
|
|
|
cls._pcaps = []
|
|
|
|
cls._old_pcaps = []
|
2016-10-11 11:47:09 +02:00
|
|
|
cls.verbose = 0
|
2016-11-24 01:59:16 +01:00
|
|
|
cls.vpp_dead = False
|
2017-01-11 08:16:53 +01:00
|
|
|
cls.registry = VppObjectRegistry()
|
2017-04-10 06:30:17 +02:00
|
|
|
cls.vpp_startup_failed = False
|
2017-08-08 04:33:53 +02:00
|
|
|
cls.reporter = KeepAliveReporter()
|
2016-10-11 11:47:09 +02:00
|
|
|
# need to catch exceptions here because if we raise, then the cleanup
|
|
|
|
# doesn't get called and we might end with a zombie vpp
|
|
|
|
try:
|
2021-03-16 12:52:12 +01:00
|
|
|
if cls.debug_attach:
|
|
|
|
cls.attach_vpp()
|
|
|
|
else:
|
|
|
|
cls.run_vpp()
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(cls, "vpp"):
|
|
|
|
return
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.reporter.send_keep_alive(cls, "setUpClass")
|
2018-09-21 13:55:16 +02:00
|
|
|
VppTestResult.current_test_case_info = TestCaseInfo(
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.logger, cls.tempdir, cls.vpp.pid, config.vpp
|
|
|
|
)
|
2016-12-08 10:16:41 +01:00
|
|
|
cls.vpp_stdout_deque = deque()
|
|
|
|
cls.vpp_stderr_deque = deque()
|
2022-08-30 13:59:03 -07:00
|
|
|
# Pump thread in a non-debug-attached & not running-vpp
|
|
|
|
if not cls.debug_attach and not hasattr(cls, "running_vpp"):
|
2021-03-16 12:52:12 +01:00
|
|
|
cls.pump_thread_stop_flag = Event()
|
|
|
|
cls.pump_thread_wakeup_pipe = os.pipe()
|
|
|
|
cls.pump_thread = Thread(target=pump_output, args=(cls,))
|
|
|
|
cls.pump_thread.daemon = True
|
|
|
|
cls.pump_thread.start()
|
|
|
|
if cls.debug_gdb or cls.debug_gdbserver or cls.debug_attach:
|
2019-10-26 22:25:49 -04:00
|
|
|
cls.vapi_response_timeout = 0
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.vapi = VppPapiProvider(cls.__name__, cls, cls.vapi_response_timeout)
|
2016-10-28 13:20:27 +02:00
|
|
|
if cls.step:
|
2019-06-20 12:24:12 -04:00
|
|
|
hook = hookmodule.StepHook(cls)
|
2016-10-28 13:20:27 +02:00
|
|
|
else:
|
2019-06-20 12:24:12 -04:00
|
|
|
hook = hookmodule.PollHook(cls)
|
2016-11-24 01:59:16 +01:00
|
|
|
cls.vapi.register_hook(hook)
|
2021-03-16 12:52:12 +01:00
|
|
|
cls.statistics = VPPStats(socketname=cls.get_stats_sock_path())
|
2017-04-10 06:30:17 +02:00
|
|
|
try:
|
|
|
|
hook.poll_vpp()
|
2018-03-21 12:35:51 +01:00
|
|
|
except VppDiedError:
|
2017-04-10 06:30:17 +02:00
|
|
|
cls.vpp_startup_failed = True
|
|
|
|
cls.logger.critical(
|
|
|
|
"VPP died shortly after startup, check the"
|
2022-04-26 19:02:15 +02:00
|
|
|
" output to standard error for possible cause"
|
|
|
|
)
|
2017-04-10 06:30:17 +02:00
|
|
|
raise
|
2016-10-28 13:20:27 +02:00
|
|
|
try:
|
|
|
|
cls.vapi.connect()
|
2021-03-03 10:40:05 +01:00
|
|
|
except (vpp_papi.VPPIOError, Exception) as e:
|
2019-12-02 21:42:28 -05:00
|
|
|
cls.logger.debug("Exception connecting to vapi: %s" % e)
|
|
|
|
cls.vapi.disconnect()
|
|
|
|
|
2016-10-28 13:20:27 +02:00
|
|
|
if cls.debug_gdbserver:
|
2022-04-26 19:02:15 +02:00
|
|
|
print(
|
|
|
|
colorize(
|
|
|
|
"You're running VPP inside gdbserver but "
|
|
|
|
"VPP-API connection failed, did you forget "
|
|
|
|
"to 'continue' VPP from within gdb?",
|
|
|
|
RED,
|
|
|
|
)
|
|
|
|
)
|
2021-03-03 10:40:05 +01:00
|
|
|
raise e
|
2021-03-16 12:52:12 +01:00
|
|
|
if cls.debug_attach:
|
|
|
|
last_line = cls.vapi.cli("show thread").split("\n")[-2]
|
|
|
|
cls.vpp_worker_count = int(last_line.split(" ")[0])
|
|
|
|
print("Detected VPP with %s workers." % cls.vpp_worker_count)
|
2020-04-28 00:27:38 -04:00
|
|
|
except vpp_papi.VPPRuntimeError as e:
|
|
|
|
cls.logger.debug("%s" % e)
|
|
|
|
cls.quit()
|
2021-03-03 10:40:05 +01:00
|
|
|
raise e
|
2019-10-13 10:06:46 +00:00
|
|
|
except Exception as e:
|
|
|
|
cls.logger.debug("Exception connecting to VPP: %s" % e)
|
2019-06-20 12:24:12 -04:00
|
|
|
cls.quit()
|
2021-03-03 10:40:05 +01:00
|
|
|
raise e
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2019-12-19 16:09:43 -05:00
|
|
|
@classmethod
|
|
|
|
def _debug_quit(cls):
|
2022-04-26 19:02:15 +02:00
|
|
|
if cls.debug_gdbserver or cls.debug_gdb:
|
2019-12-19 16:09:43 -05:00
|
|
|
try:
|
|
|
|
cls.vpp.poll()
|
|
|
|
|
|
|
|
if cls.vpp.returncode is None:
|
|
|
|
print()
|
|
|
|
print(double_line_delim)
|
|
|
|
print("VPP or GDB server is still running")
|
|
|
|
print(single_line_delim)
|
2022-04-26 19:02:15 +02:00
|
|
|
input(
|
|
|
|
"When done debugging, press ENTER to kill the "
|
|
|
|
"process and finish running the testcase..."
|
|
|
|
)
|
2019-12-19 16:09:43 -05:00
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
@classmethod
|
|
|
|
def quit(cls):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Disconnect vpp-api, kill vpp and cleanup shared memory files
|
|
|
|
"""
|
2019-12-19 16:09:43 -05:00
|
|
|
cls._debug_quit()
|
2022-08-30 13:59:03 -07:00
|
|
|
if hasattr(cls, "running_vpp"):
|
|
|
|
cls.vpp.quit_vpp()
|
2016-10-28 13:20:27 +02:00
|
|
|
|
2018-07-16 14:22:01 +02:00
|
|
|
# first signal that we want to stop the pump thread, then wake it up
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(cls, "pump_thread_stop_flag"):
|
2018-07-16 14:22:01 +02:00
|
|
|
cls.pump_thread_stop_flag.set()
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(cls, "pump_thread_wakeup_pipe"):
|
|
|
|
os.write(cls.pump_thread_wakeup_pipe[1], b"ding dong wake up")
|
|
|
|
if hasattr(cls, "pump_thread"):
|
2017-02-14 02:55:31 +01:00
|
|
|
cls.logger.debug("Waiting for pump thread to stop")
|
|
|
|
cls.pump_thread.join()
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(cls, "vpp_stderr_reader_thread"):
|
2019-12-19 16:09:43 -05:00
|
|
|
cls.logger.debug("Waiting for stderr pump to stop")
|
2017-02-14 02:55:31 +01:00
|
|
|
cls.vpp_stderr_reader_thread.join()
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(cls, "vpp"):
|
|
|
|
if hasattr(cls, "vapi"):
|
2019-11-27 23:12:48 +01:00
|
|
|
cls.logger.debug(cls.vapi.vpp.get_stats())
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.logger.debug("Disconnecting class vapi client on %s", cls.__name__)
|
2016-12-02 07:05:24 +01:00
|
|
|
cls.vapi.disconnect()
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.logger.debug("Deleting class vapi attribute on %s", cls.__name__)
|
2017-02-14 02:55:31 +01:00
|
|
|
del cls.vapi
|
2016-10-11 11:47:09 +02:00
|
|
|
cls.vpp.poll()
|
2021-03-16 12:52:12 +01:00
|
|
|
if not cls.debug_attach and cls.vpp.returncode is None:
|
2019-10-31 08:34:22 +00:00
|
|
|
cls.wait_for_coredump()
|
2017-02-14 02:55:31 +01:00
|
|
|
cls.logger.debug("Sending TERM to vpp")
|
2019-05-06 10:49:41 -04:00
|
|
|
cls.vpp.terminate()
|
2017-02-14 02:55:31 +01:00
|
|
|
cls.logger.debug("Waiting for vpp to die")
|
2021-03-03 10:40:05 +01:00
|
|
|
try:
|
|
|
|
outs, errs = cls.vpp.communicate(timeout=5)
|
|
|
|
except subprocess.TimeoutExpired:
|
|
|
|
cls.vpp.kill()
|
|
|
|
outs, errs = cls.vpp.communicate()
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.logger.debug("Deleting class vpp attribute on %s", cls.__name__)
|
2022-08-30 13:59:03 -07:00
|
|
|
if not cls.debug_attach and not hasattr(cls, "running_vpp"):
|
2021-03-16 12:52:12 +01:00
|
|
|
cls.vpp.stdout.close()
|
|
|
|
cls.vpp.stderr.close()
|
2022-08-30 13:59:03 -07:00
|
|
|
# If vpp is a dynamic attribute set by the func use_running,
|
|
|
|
# deletion will result in an AttributeError that we can
|
|
|
|
# safetly pass.
|
|
|
|
try:
|
|
|
|
del cls.vpp
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2017-04-10 06:30:17 +02:00
|
|
|
if cls.vpp_startup_failed:
|
|
|
|
stdout_log = cls.logger.info
|
|
|
|
stderr_log = cls.logger.critical
|
|
|
|
else:
|
|
|
|
stdout_log = cls.logger.info
|
|
|
|
stderr_log = cls.logger.info
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(cls, "vpp_stdout_deque"):
|
2017-04-10 06:30:17 +02:00
|
|
|
stdout_log(single_line_delim)
|
2022-04-26 19:02:15 +02:00
|
|
|
stdout_log("VPP output to stdout while running %s:", cls.__name__)
|
2017-04-10 06:30:17 +02:00
|
|
|
stdout_log(single_line_delim)
|
2016-12-08 10:16:41 +01:00
|
|
|
vpp_output = "".join(cls.vpp_stdout_deque)
|
2022-04-26 19:02:15 +02:00
|
|
|
with open(cls.tempdir + "/vpp_stdout.txt", "w") as f:
|
2017-04-11 06:01:53 +02:00
|
|
|
f.write(vpp_output)
|
2022-04-26 19:02:15 +02:00
|
|
|
stdout_log("\n%s", vpp_output)
|
2017-04-10 06:30:17 +02:00
|
|
|
stdout_log(single_line_delim)
|
2016-10-28 13:20:27 +02:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(cls, "vpp_stderr_deque"):
|
2017-04-10 06:30:17 +02:00
|
|
|
stderr_log(single_line_delim)
|
2022-04-26 19:02:15 +02:00
|
|
|
stderr_log("VPP output to stderr while running %s:", cls.__name__)
|
2017-04-10 06:30:17 +02:00
|
|
|
stderr_log(single_line_delim)
|
2019-01-29 11:49:09 +01:00
|
|
|
vpp_output = "".join(cls.vpp_stderr_deque)
|
2022-04-26 19:02:15 +02:00
|
|
|
with open(cls.tempdir + "/vpp_stderr.txt", "w") as f:
|
2017-04-11 06:01:53 +02:00
|
|
|
f.write(vpp_output)
|
2022-04-26 19:02:15 +02:00
|
|
|
stderr_log("\n%s", vpp_output)
|
2017-04-10 06:30:17 +02:00
|
|
|
stderr_log(single_line_delim)
|
2016-10-28 13:20:27 +02:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""Perform final cleanup after running all tests in this test-case"""
|
|
|
|
cls.logger.debug("--- tearDownClass() for %s called ---" % cls.__name__)
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(cls, "vpp"):
|
|
|
|
return
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.reporter.send_keep_alive(cls, "tearDownClass")
|
2016-10-03 19:44:57 +02:00
|
|
|
cls.quit()
|
2017-04-11 06:01:53 +02:00
|
|
|
cls.file_handler.close()
|
2018-02-17 13:41:33 +01:00
|
|
|
cls.reset_packet_infos()
|
2021-05-31 16:08:53 +02:00
|
|
|
if config.debug_framework:
|
2018-02-17 13:41:33 +01:00
|
|
|
debug_internal.on_tear_down_class(cls)
|
2016-10-03 19:44:57 +02:00
|
|
|
|
2019-03-13 09:23:05 -07:00
|
|
|
def show_commands_at_teardown(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""Allow subclass specific teardown logging additions."""
|
2019-03-13 09:23:05 -07:00
|
|
|
self.logger.info("--- No test specific show commands provided. ---")
|
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
def tearDown(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""Show various debug prints after each test"""
|
|
|
|
self.logger.debug(
|
|
|
|
"--- tearDown() for %s.%s(%s) called ---"
|
|
|
|
% (self.__class__.__name__, self._testMethodName, self._testMethodDoc)
|
|
|
|
)
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(self, "vpp"):
|
|
|
|
return
|
tests: framework gracefully handle 'VppTransportShmemIOError'
Catches:
----
Traceback (most recent call last):
File "/vpp/test/framework.py", line 593, in tearDown
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
File "/vpp/test/vpp_papi_provider.py", line 413, in ppcli
return cli + "\n" + str(self.cli(cli))
File "/vpp/test/vpp_papi_provider.py", line 402, in cli
r = self.papi.cli_inband(cmd=cli)
File "/vpp/src/vpp-api/python/vpp_papi/vpp_papi.py", line 100, in __call__
return self._func(**kwargs)
File "/vpp/src/vpp-api/python/vpp_papi/vpp_papi.py", line 414, in f
return self._call_vpp(i, msg, multipart, **kwargs)
File "/vpp/src/vpp-api/python/vpp_papi/vpp_papi.py", line 634, in _call_vpp
msg = self.transport.read()
File "/vpp/src/vpp-api/python/vpp_papi/vpp_transport_shmem.py", line 120, in read
raise VppTransportShmemIOError(rv, 'vac_read failed')
VppTransportShmemIOError: [Errno -1] vac_read failed
----
Change-Id: I767e48c4d03081eb5df6a8aa67da7e192d25e4cc
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2019-03-15 09:39:19 -07:00
|
|
|
|
|
|
|
try:
|
|
|
|
if not self.vpp_dead:
|
|
|
|
self.logger.debug(self.vapi.cli("show trace max 1000"))
|
|
|
|
self.logger.info(self.vapi.ppcli("show interface"))
|
|
|
|
self.logger.info(self.vapi.ppcli("show hardware"))
|
|
|
|
self.logger.info(self.statistics.set_errors_str())
|
|
|
|
self.logger.info(self.vapi.ppcli("show run"))
|
|
|
|
self.logger.info(self.vapi.ppcli("show log"))
|
2019-07-08 12:25:38 -04:00
|
|
|
self.logger.info(self.vapi.ppcli("show bihash"))
|
tests: framework gracefully handle 'VppTransportShmemIOError'
Catches:
----
Traceback (most recent call last):
File "/vpp/test/framework.py", line 593, in tearDown
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
File "/vpp/test/vpp_papi_provider.py", line 413, in ppcli
return cli + "\n" + str(self.cli(cli))
File "/vpp/test/vpp_papi_provider.py", line 402, in cli
r = self.papi.cli_inband(cmd=cli)
File "/vpp/src/vpp-api/python/vpp_papi/vpp_papi.py", line 100, in __call__
return self._func(**kwargs)
File "/vpp/src/vpp-api/python/vpp_papi/vpp_papi.py", line 414, in f
return self._call_vpp(i, msg, multipart, **kwargs)
File "/vpp/src/vpp-api/python/vpp_papi/vpp_papi.py", line 634, in _call_vpp
msg = self.transport.read()
File "/vpp/src/vpp-api/python/vpp_papi/vpp_transport_shmem.py", line 120, in read
raise VppTransportShmemIOError(rv, 'vac_read failed')
VppTransportShmemIOError: [Errno -1] vac_read failed
----
Change-Id: I767e48c4d03081eb5df6a8aa67da7e192d25e4cc
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2019-03-15 09:39:19 -07:00
|
|
|
self.logger.info("Logging testcase specific show commands.")
|
|
|
|
self.show_commands_at_teardown()
|
2022-02-18 10:30:51 +00:00
|
|
|
if self.remove_configured_vpp_objects_on_tear_down:
|
|
|
|
self.registry.remove_vpp_config(self.logger)
|
2017-02-16 11:25:26 -05:00
|
|
|
# Save/Dump VPP api trace log
|
2019-10-21 12:55:48 +00:00
|
|
|
m = self._testMethodName
|
|
|
|
api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
|
2017-02-16 11:25:26 -05:00
|
|
|
tmp_api_trace = "/tmp/%s" % api_trace
|
|
|
|
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
|
|
|
|
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
|
2022-04-26 19:02:15 +02:00
|
|
|
self.logger.info("Moving %s to %s\n" % (tmp_api_trace, vpp_api_trace_log))
|
2023-01-27 12:49:55 +00:00
|
|
|
shutil.move(tmp_api_trace, vpp_api_trace_log)
|
2021-03-03 10:40:05 +01:00
|
|
|
except VppTransportSocketIOError:
|
2022-04-26 19:02:15 +02:00
|
|
|
self.logger.debug(
|
|
|
|
"VppTransportSocketIOError: Vpp dead. Cannot log show commands."
|
|
|
|
)
|
tests: framework gracefully handle 'VppTransportShmemIOError'
Catches:
----
Traceback (most recent call last):
File "/vpp/test/framework.py", line 593, in tearDown
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
File "/vpp/test/vpp_papi_provider.py", line 413, in ppcli
return cli + "\n" + str(self.cli(cli))
File "/vpp/test/vpp_papi_provider.py", line 402, in cli
r = self.papi.cli_inband(cmd=cli)
File "/vpp/src/vpp-api/python/vpp_papi/vpp_papi.py", line 100, in __call__
return self._func(**kwargs)
File "/vpp/src/vpp-api/python/vpp_papi/vpp_papi.py", line 414, in f
return self._call_vpp(i, msg, multipart, **kwargs)
File "/vpp/src/vpp-api/python/vpp_papi/vpp_papi.py", line 634, in _call_vpp
msg = self.transport.read()
File "/vpp/src/vpp-api/python/vpp_papi/vpp_transport_shmem.py", line 120, in read
raise VppTransportShmemIOError(rv, 'vac_read failed')
VppTransportShmemIOError: [Errno -1] vac_read failed
----
Change-Id: I767e48c4d03081eb5df6a8aa67da7e192d25e4cc
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2019-03-15 09:39:19 -07:00
|
|
|
self.vpp_dead = True
|
2017-03-02 11:29:19 +01:00
|
|
|
else:
|
|
|
|
self.registry.unregister_all(self.logger)
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
def setUp(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""Clear trace before running each test"""
|
2019-01-25 14:05:48 -08:00
|
|
|
super(VppTestCase, self).setUp()
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(self, "vpp"):
|
|
|
|
return
|
2017-08-08 04:33:53 +02:00
|
|
|
self.reporter.send_keep_alive(self)
|
2016-12-08 05:03:32 +01:00
|
|
|
if self.vpp_dead:
|
2022-04-26 19:02:15 +02:00
|
|
|
raise VppDiedError(
|
|
|
|
rv=None,
|
|
|
|
testcase=self.__class__.__name__,
|
|
|
|
method_name=self._testMethodName,
|
|
|
|
)
|
|
|
|
self.sleep(0.1, "during setUp")
|
2016-12-08 10:16:41 +01:00
|
|
|
self.vpp_stdout_deque.append(
|
2022-04-26 19:02:15 +02:00
|
|
|
"--- test setUp() for %s.%s(%s) starts here ---\n"
|
|
|
|
% (self.__class__.__name__, self._testMethodName, self._testMethodDoc)
|
|
|
|
)
|
2016-12-08 10:16:41 +01:00
|
|
|
self.vpp_stderr_deque.append(
|
2022-04-26 19:02:15 +02:00
|
|
|
"--- test setUp() for %s.%s(%s) starts here ---\n"
|
|
|
|
% (self.__class__.__name__, self._testMethodName, self._testMethodDoc)
|
|
|
|
)
|
2016-10-11 11:47:09 +02:00
|
|
|
self.vapi.cli("clear trace")
|
|
|
|
# store the test instance inside the test class - so that objects
|
|
|
|
# holding the class can access instance methods (like assertEqual)
|
|
|
|
type(self).test_instance = self
|
2016-10-03 19:44:57 +02:00
|
|
|
|
|
|
|
@classmethod
|
2017-09-20 08:26:30 +02:00
|
|
|
def pg_enable_capture(cls, interfaces=None):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Enable capture on packet-generator interfaces
|
|
|
|
|
2017-09-20 08:26:30 +02:00
|
|
|
:param interfaces: iterable interface indexes (if None,
|
|
|
|
use self.pg_interfaces)
|
2016-10-11 11:47:09 +02:00
|
|
|
|
|
|
|
"""
|
2017-09-20 08:26:30 +02:00
|
|
|
if interfaces is None:
|
|
|
|
interfaces = cls.pg_interfaces
|
2016-10-11 11:47:09 +02:00
|
|
|
for i in interfaces:
|
|
|
|
i.enable_capture()
|
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
@classmethod
|
2021-04-01 18:19:29 +02:00
|
|
|
def register_pcap(cls, intf, worker):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""Register a pcap in the testclass"""
|
2016-12-12 08:36:58 +01:00
|
|
|
# add to the list of captures with current timestamp
|
2021-04-01 18:19:29 +02:00
|
|
|
cls._pcaps.append((intf, worker))
|
2016-12-12 08:36:58 +01:00
|
|
|
|
2019-10-13 18:56:03 +00:00
|
|
|
@classmethod
|
|
|
|
def get_vpp_time(cls):
|
2020-03-11 10:31:36 -04:00
|
|
|
# processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
|
|
|
|
# returns float("2.190522")
|
2022-04-26 19:02:15 +02:00
|
|
|
timestr = cls.vapi.cli("show clock")
|
|
|
|
head, sep, tail = timestr.partition(",")
|
|
|
|
head, sep, tail = head.partition("Time now")
|
2020-03-11 10:31:36 -04:00
|
|
|
return float(tail)
|
2019-10-13 18:56:03 +00:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def sleep_on_vpp_time(cls, sec):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""Sleep according to time in VPP world"""
|
2019-10-13 18:56:03 +00:00
|
|
|
# On a busy system with many processes
|
|
|
|
# we might end up with VPP time being slower than real world
|
|
|
|
# So take that into account when waiting for VPP to do something
|
|
|
|
start_time = cls.get_vpp_time()
|
|
|
|
while cls.get_vpp_time() - start_time < sec:
|
|
|
|
cls.sleep(0.1)
|
|
|
|
|
2016-12-12 08:36:58 +01:00
|
|
|
@classmethod
|
2021-02-19 16:39:13 +01:00
|
|
|
def pg_start(cls, trace=True):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""Enable the PG, wait till it is done, then clean up"""
|
2023-08-15 19:05:26 -04:00
|
|
|
for intf, worker in cls._old_pcaps:
|
2022-04-26 19:02:15 +02:00
|
|
|
intf.handle_old_pcap_file(intf.get_in_path(worker), intf.in_history_counter)
|
2021-04-01 18:19:29 +02:00
|
|
|
cls._old_pcaps = []
|
2021-02-19 16:39:13 +01:00
|
|
|
if trace:
|
|
|
|
cls.vapi.cli("clear trace")
|
|
|
|
cls.vapi.cli("trace add pg-input 1000")
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.vapi.cli("packet-generator enable")
|
2019-10-13 10:09:50 +00:00
|
|
|
# PG, when starts, runs to completion -
|
|
|
|
# so let's avoid a race condition,
|
|
|
|
# and wait a little till it's done.
|
|
|
|
# Then clean it up - and then be gone.
|
|
|
|
deadline = time.time() + 300
|
2022-04-26 19:02:15 +02:00
|
|
|
while cls.vapi.cli("show packet-generator").find("Yes") != -1:
|
2019-10-13 10:09:50 +00:00
|
|
|
cls.sleep(0.01) # yield
|
|
|
|
if time.time() > deadline:
|
|
|
|
cls.logger.error("Timeout waiting for pg to stop")
|
|
|
|
break
|
2021-04-01 18:19:29 +02:00
|
|
|
for intf, worker in cls._pcaps:
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.vapi.cli("packet-generator delete %s" % intf.get_cap_name(worker))
|
2021-04-01 18:19:29 +02:00
|
|
|
cls._old_pcaps = cls._pcaps
|
|
|
|
cls._pcaps = []
|
2016-10-03 19:44:57 +02:00
|
|
|
|
|
|
|
@classmethod
|
2022-04-26 19:02:15 +02:00
|
|
|
def create_pg_interfaces_internal(cls, interfaces, gso=0, gso_size=0, mode=None):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
2016-12-22 11:06:56 +01:00
|
|
|
Create packet-generator interfaces.
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2016-12-22 11:06:56 +01:00
|
|
|
:param interfaces: iterable indexes of the interfaces.
|
|
|
|
:returns: List of created interfaces.
|
2016-10-11 11:47:09 +02:00
|
|
|
|
|
|
|
"""
|
|
|
|
result = []
|
|
|
|
for i in interfaces:
|
2021-06-03 14:43:21 +00:00
|
|
|
intf = VppPGInterface(cls, i, gso, gso_size, mode)
|
2016-10-11 11:47:09 +02:00
|
|
|
setattr(cls, intf.name, intf)
|
|
|
|
result.append(intf)
|
|
|
|
cls.pg_interfaces = result
|
|
|
|
return result
|
|
|
|
|
2021-06-03 14:43:21 +00:00
|
|
|
@classmethod
|
|
|
|
def create_pg_ip4_interfaces(cls, interfaces, gso=0, gso_size=0):
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(cls, "vpp"):
|
|
|
|
cls.pg_interfaces = []
|
|
|
|
return cls.pg_interfaces
|
2021-06-03 14:43:21 +00:00
|
|
|
pgmode = VppEnum.vl_api_pg_interface_mode_t
|
2022-04-26 19:02:15 +02:00
|
|
|
return cls.create_pg_interfaces_internal(
|
|
|
|
interfaces, gso, gso_size, pgmode.PG_API_MODE_IP4
|
|
|
|
)
|
2021-06-03 14:43:21 +00:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def create_pg_ip6_interfaces(cls, interfaces, gso=0, gso_size=0):
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(cls, "vpp"):
|
|
|
|
cls.pg_interfaces = []
|
|
|
|
return cls.pg_interfaces
|
2021-06-03 14:43:21 +00:00
|
|
|
pgmode = VppEnum.vl_api_pg_interface_mode_t
|
2022-04-26 19:02:15 +02:00
|
|
|
return cls.create_pg_interfaces_internal(
|
|
|
|
interfaces, gso, gso_size, pgmode.PG_API_MODE_IP6
|
|
|
|
)
|
2021-06-03 14:43:21 +00:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(cls, "vpp"):
|
|
|
|
cls.pg_interfaces = []
|
|
|
|
return cls.pg_interfaces
|
2021-06-03 14:43:21 +00:00
|
|
|
pgmode = VppEnum.vl_api_pg_interface_mode_t
|
2022-04-26 19:02:15 +02:00
|
|
|
return cls.create_pg_interfaces_internal(
|
|
|
|
interfaces, gso, gso_size, pgmode.PG_API_MODE_ETHERNET
|
|
|
|
)
|
2021-06-03 14:43:21 +00:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def create_pg_ethernet_interfaces(cls, interfaces, gso=0, gso_size=0):
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(cls, "vpp"):
|
|
|
|
cls.pg_interfaces = []
|
|
|
|
return cls.pg_interfaces
|
2021-06-03 14:43:21 +00:00
|
|
|
pgmode = VppEnum.vl_api_pg_interface_mode_t
|
2022-04-26 19:02:15 +02:00
|
|
|
return cls.create_pg_interfaces_internal(
|
|
|
|
interfaces, gso, gso_size, pgmode.PG_API_MODE_ETHERNET
|
|
|
|
)
|
2021-06-03 14:43:21 +00:00
|
|
|
|
2016-11-04 11:11:44 +01:00
|
|
|
@classmethod
|
2018-06-24 22:49:33 +02:00
|
|
|
def create_loopback_interfaces(cls, count):
|
2016-11-04 11:11:44 +01:00
|
|
|
"""
|
2016-12-22 11:06:56 +01:00
|
|
|
Create loopback interfaces.
|
2016-11-04 11:11:44 +01:00
|
|
|
|
2018-06-24 22:49:33 +02:00
|
|
|
:param count: number of interfaces created.
|
2016-12-22 11:06:56 +01:00
|
|
|
:returns: List of created interfaces.
|
2016-11-04 11:11:44 +01:00
|
|
|
"""
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(cls, "vpp"):
|
|
|
|
cls.lo_interfaces = []
|
|
|
|
return cls.lo_interfaces
|
2018-06-24 22:49:33 +02:00
|
|
|
result = [VppLoInterface(cls) for i in range(count)]
|
|
|
|
for intf in result:
|
2016-11-04 11:11:44 +01:00
|
|
|
setattr(cls, intf.name, intf)
|
|
|
|
cls.lo_interfaces = result
|
|
|
|
return result
|
|
|
|
|
2019-03-15 02:16:20 -07:00
|
|
|
@classmethod
|
|
|
|
def create_bvi_interfaces(cls, count):
|
|
|
|
"""
|
|
|
|
Create BVI interfaces.
|
|
|
|
|
|
|
|
:param count: number of interfaces created.
|
|
|
|
:returns: List of created interfaces.
|
|
|
|
"""
|
2022-09-20 21:52:18 -04:00
|
|
|
if not hasattr(cls, "vpp"):
|
|
|
|
cls.bvi_interfaces = []
|
|
|
|
return cls.bvi_interfaces
|
2019-03-15 02:16:20 -07:00
|
|
|
result = [VppBviInterface(cls) for i in range(count)]
|
|
|
|
for intf in result:
|
|
|
|
setattr(cls, intf.name, intf)
|
|
|
|
cls.bvi_interfaces = result
|
|
|
|
return result
|
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
@staticmethod
|
2022-04-26 19:02:15 +02:00
|
|
|
def extend_packet(packet, size, padding=" "):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
2017-09-20 08:26:30 +02:00
|
|
|
Extend packet to given size by padding with spaces or custom padding
|
2016-10-11 11:47:09 +02:00
|
|
|
NOTE: Currently works only when Raw layer is present.
|
|
|
|
|
|
|
|
:param packet: packet
|
|
|
|
:param size: target size
|
2017-09-20 08:26:30 +02:00
|
|
|
:param padding: padding used to extend the payload
|
2016-10-11 11:47:09 +02:00
|
|
|
|
|
|
|
"""
|
2016-10-03 19:44:57 +02:00
|
|
|
packet_len = len(packet) + 4
|
|
|
|
extend = size - packet_len
|
|
|
|
if extend > 0:
|
2019-05-07 10:39:57 +02:00
|
|
|
num = (extend // len(padding)) + 1
|
|
|
|
packet[Raw].load += (padding * num)[:extend].encode("ascii")
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2016-12-21 08:50:14 +01:00
|
|
|
@classmethod
|
|
|
|
def reset_packet_infos(cls):
|
2022-04-26 19:02:15 +02:00
|
|
|
"""Reset the list of packet info objects and packet counts to zero"""
|
2016-12-21 08:50:14 +01:00
|
|
|
cls._packet_infos = {}
|
|
|
|
cls._packet_count_for_dst_if_idx = {}
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2016-12-21 08:50:14 +01:00
|
|
|
@classmethod
|
|
|
|
def create_packet_info(cls, src_if, dst_if):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Create packet info object containing the source and destination indexes
|
|
|
|
and add it to the testcase's packet info list
|
|
|
|
|
2016-12-21 08:50:14 +01:00
|
|
|
:param VppInterface src_if: source interface
|
|
|
|
:param VppInterface dst_if: destination interface
|
2016-10-11 11:47:09 +02:00
|
|
|
|
|
|
|
:returns: _PacketInfo object
|
|
|
|
|
|
|
|
"""
|
2016-10-03 19:44:57 +02:00
|
|
|
info = _PacketInfo()
|
2016-12-21 08:50:14 +01:00
|
|
|
info.index = len(cls._packet_infos)
|
|
|
|
info.src = src_if.sw_if_index
|
|
|
|
info.dst = dst_if.sw_if_index
|
|
|
|
if isinstance(dst_if, VppSubInterface):
|
|
|
|
dst_idx = dst_if.parent.sw_if_index
|
|
|
|
else:
|
|
|
|
dst_idx = dst_if.sw_if_index
|
|
|
|
if dst_idx in cls._packet_count_for_dst_if_idx:
|
|
|
|
cls._packet_count_for_dst_if_idx[dst_idx] += 1
|
|
|
|
else:
|
|
|
|
cls._packet_count_for_dst_if_idx[dst_idx] = 1
|
|
|
|
cls._packet_infos[info.index] = info
|
2016-10-03 19:44:57 +02:00
|
|
|
return info
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
@staticmethod
|
|
|
|
def info_to_payload(info):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Convert _PacketInfo object to packet payload
|
|
|
|
|
|
|
|
:param info: _PacketInfo object
|
|
|
|
|
|
|
|
:returns: string containing serialized data from packet info
|
|
|
|
"""
|
2021-09-22 11:24:06 +01:00
|
|
|
|
|
|
|
# retrieve payload, currently 18 bytes (4 x ints + 1 short)
|
2022-04-26 19:02:15 +02:00
|
|
|
return pack("iiiih", info.index, info.src, info.dst, info.ip, info.proto)
|
2016-10-03 19:44:57 +02:00
|
|
|
|
|
|
|
@staticmethod
|
2022-04-26 19:02:15 +02:00
|
|
|
def payload_to_info(payload, payload_field="load"):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Convert packet payload to _PacketInfo object
|
|
|
|
|
|
|
|
:param payload: packet payload
|
2019-03-12 20:04:56 -07:00
|
|
|
:type payload: <class 'scapy.packet.Raw'>
|
|
|
|
:param payload_field: packet fieldname of payload "load" for
|
2019-03-06 11:58:06 -08:00
|
|
|
<class 'scapy.packet.Raw'>
|
2019-03-12 20:04:56 -07:00
|
|
|
:type payload_field: str
|
2016-10-11 11:47:09 +02:00
|
|
|
:returns: _PacketInfo object containing de-serialized data from payload
|
|
|
|
|
|
|
|
"""
|
2021-09-22 11:24:06 +01:00
|
|
|
|
|
|
|
# retrieve payload, currently 18 bytes (4 x ints + 1 short)
|
|
|
|
payload_b = getattr(payload, payload_field)[:18]
|
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
info = _PacketInfo()
|
2022-04-26 19:02:15 +02:00
|
|
|
info.index, info.src, info.dst, info.ip, info.proto = unpack("iiiih", payload_b)
|
2021-09-22 11:24:06 +01:00
|
|
|
|
|
|
|
# some SRv6 TCs depend on get an exception if bad values are detected
|
|
|
|
if info.index > 0x4000:
|
2022-04-26 19:02:15 +02:00
|
|
|
raise ValueError("Index value is invalid")
|
2021-09-22 11:24:06 +01:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
return info
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
def get_next_packet_info(self, info):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Iterate over the packet info list stored in the testcase
|
|
|
|
Start iteration with first element if info is None
|
|
|
|
Continue based on index in info if info is specified
|
|
|
|
|
|
|
|
:param info: info or None
|
|
|
|
:returns: next info in list or None if no more infos
|
|
|
|
"""
|
2016-10-03 19:44:57 +02:00
|
|
|
if info is None:
|
|
|
|
next_index = 0
|
|
|
|
else:
|
|
|
|
next_index = info.index + 1
|
2016-12-21 08:50:14 +01:00
|
|
|
if next_index == len(self._packet_infos):
|
2016-10-03 19:44:57 +02:00
|
|
|
return None
|
|
|
|
else:
|
2016-12-21 08:50:14 +01:00
|
|
|
return self._packet_infos[next_index]
|
2016-10-11 11:47:09 +02:00
|
|
|
|
|
|
|
def get_next_packet_info_for_interface(self, src_index, info):
|
|
|
|
"""
|
|
|
|
Search the packet info list for the next packet info with same source
|
|
|
|
interface index
|
|
|
|
|
|
|
|
:param src_index: source interface index to search for
|
|
|
|
:param info: packet info - where to start the search
|
|
|
|
:returns: packet info or None
|
|
|
|
|
|
|
|
"""
|
2016-10-03 19:44:57 +02:00
|
|
|
while True:
|
|
|
|
info = self.get_next_packet_info(info)
|
|
|
|
if info is None:
|
|
|
|
return None
|
2016-10-11 11:47:09 +02:00
|
|
|
if info.src == src_index:
|
2016-10-03 19:44:57 +02:00
|
|
|
return info
|
2016-10-11 11:47:09 +02:00
|
|
|
|
|
|
|
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
|
|
|
|
"""
|
|
|
|
Search the packet info list for the next packet info with same source
|
|
|
|
and destination interface indexes
|
|
|
|
|
|
|
|
:param src_index: source interface index to search for
|
|
|
|
:param dst_index: destination interface index to search for
|
|
|
|
:param info: packet info - where to start the search
|
|
|
|
:returns: packet info or None
|
|
|
|
|
|
|
|
"""
|
2016-10-03 19:44:57 +02:00
|
|
|
while True:
|
2016-10-11 11:47:09 +02:00
|
|
|
info = self.get_next_packet_info_for_interface(src_index, info)
|
2016-10-03 19:44:57 +02:00
|
|
|
if info is None:
|
|
|
|
return None
|
2016-10-11 11:47:09 +02:00
|
|
|
if info.dst == dst_index:
|
2016-10-03 19:44:57 +02:00
|
|
|
return info
|
|
|
|
|
2016-09-29 14:43:44 +02:00
|
|
|
def assert_equal(self, real_value, expected_value, name_or_class=None):
|
|
|
|
if name_or_class is None:
|
2017-02-16 10:53:53 +01:00
|
|
|
self.assertEqual(real_value, expected_value)
|
2016-09-29 14:43:44 +02:00
|
|
|
return
|
|
|
|
try:
|
|
|
|
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
|
2022-04-26 19:02:15 +02:00
|
|
|
msg = msg % (
|
|
|
|
getdoc(name_or_class).strip(),
|
|
|
|
real_value,
|
|
|
|
str(name_or_class(real_value)),
|
|
|
|
expected_value,
|
|
|
|
str(name_or_class(expected_value)),
|
|
|
|
)
|
2018-03-21 12:35:51 +01:00
|
|
|
except Exception:
|
2016-09-29 14:43:44 +02:00
|
|
|
msg = "Invalid %s: %s does not match expected value %s" % (
|
2022-04-26 19:02:15 +02:00
|
|
|
name_or_class,
|
|
|
|
real_value,
|
|
|
|
expected_value,
|
|
|
|
)
|
2016-09-29 14:43:44 +02:00
|
|
|
|
|
|
|
self.assertEqual(real_value, expected_value, msg)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def assert_in_range(self, real_value, expected_min, expected_max, name=None):
|
2016-09-29 14:43:44 +02:00
|
|
|
if name is None:
|
|
|
|
msg = None
|
|
|
|
else:
|
|
|
|
msg = "Invalid %s: %s out of range <%s,%s>" % (
|
2022-04-26 19:02:15 +02:00
|
|
|
name,
|
|
|
|
real_value,
|
|
|
|
expected_min,
|
|
|
|
expected_max,
|
|
|
|
)
|
2016-09-29 14:43:44 +02:00
|
|
|
self.assertTrue(expected_min <= real_value <= expected_max, msg)
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def assert_packet_checksums_valid(self, packet, ignore_zero_udp_checksums=True):
|
2019-03-10 10:04:23 -07:00
|
|
|
received = packet.__class__(scapy.compat.raw(packet))
|
2022-04-26 19:02:15 +02:00
|
|
|
udp_layers = ["UDP", "UDPerror"]
|
|
|
|
checksum_fields = ["cksum", "chksum"]
|
2018-05-16 10:52:54 +02:00
|
|
|
checksums = []
|
|
|
|
counter = 0
|
2019-03-10 10:04:23 -07:00
|
|
|
temp = received.__class__(scapy.compat.raw(received))
|
2018-05-16 10:52:54 +02:00
|
|
|
while True:
|
|
|
|
layer = temp.getlayer(counter)
|
|
|
|
if layer:
|
2019-12-05 13:13:21 +00:00
|
|
|
layer = layer.copy()
|
|
|
|
layer.remove_payload()
|
2018-05-16 10:52:54 +02:00
|
|
|
for cf in checksum_fields:
|
|
|
|
if hasattr(layer, cf):
|
2022-04-26 19:02:15 +02:00
|
|
|
if (
|
|
|
|
ignore_zero_udp_checksums
|
|
|
|
and 0 == getattr(layer, cf)
|
|
|
|
and layer.name in udp_layers
|
|
|
|
):
|
2018-05-16 10:52:54 +02:00
|
|
|
continue
|
2019-12-05 13:13:21 +00:00
|
|
|
delattr(temp.getlayer(counter), cf)
|
2018-05-16 10:52:54 +02:00
|
|
|
checksums.append((counter, cf))
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
counter = counter + 1
|
2018-06-24 22:49:55 +02:00
|
|
|
if 0 == len(checksums):
|
|
|
|
return
|
2019-03-10 10:04:23 -07:00
|
|
|
temp = temp.__class__(scapy.compat.raw(temp))
|
2022-11-14 11:26:18 +01:00
|
|
|
for layer, cf in reversed(checksums):
|
2018-06-24 22:49:55 +02:00
|
|
|
calc_sum = getattr(temp[layer], cf)
|
|
|
|
self.assert_equal(
|
2022-04-26 19:02:15 +02:00
|
|
|
getattr(received[layer], cf),
|
|
|
|
calc_sum,
|
|
|
|
"packet checksum on layer #%d: %s" % (layer, temp[layer].name),
|
|
|
|
)
|
2018-06-24 22:49:55 +02:00
|
|
|
self.logger.debug(
|
2022-04-26 19:02:15 +02:00
|
|
|
"Checksum field `%s` on `%s` layer has correct value `%s`"
|
|
|
|
% (cf, temp[layer].name, calc_sum)
|
|
|
|
)
|
|
|
|
|
|
|
|
def assert_checksum_valid(
|
2022-11-14 11:26:18 +01:00
|
|
|
self,
|
|
|
|
received_packet,
|
|
|
|
layer,
|
|
|
|
checksum_field_names=["chksum", "cksum"],
|
|
|
|
ignore_zero_checksum=False,
|
2022-04-26 19:02:15 +02:00
|
|
|
):
|
|
|
|
"""Check checksum of received packet on given layer"""
|
2022-11-14 11:26:18 +01:00
|
|
|
layer_copy = received_packet[layer].copy()
|
|
|
|
layer_copy.remove_payload()
|
|
|
|
field_name = None
|
|
|
|
for f in checksum_field_names:
|
|
|
|
if hasattr(layer_copy, f):
|
|
|
|
field_name = f
|
|
|
|
break
|
|
|
|
if field_name is None:
|
|
|
|
raise Exception(
|
|
|
|
f"Layer `{layer}` has none of checksum fields: `{checksum_field_names}`."
|
|
|
|
)
|
2018-05-16 10:52:54 +02:00
|
|
|
received_packet_checksum = getattr(received_packet[layer], field_name)
|
|
|
|
if ignore_zero_checksum and 0 == received_packet_checksum:
|
|
|
|
return
|
2022-04-26 19:02:15 +02:00
|
|
|
recalculated = received_packet.__class__(scapy.compat.raw(received_packet))
|
2018-05-16 10:52:54 +02:00
|
|
|
delattr(recalculated[layer], field_name)
|
2019-03-10 10:04:23 -07:00
|
|
|
recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_equal(
|
|
|
|
received_packet_checksum,
|
|
|
|
getattr(recalculated[layer], field_name),
|
2022-11-14 11:26:18 +01:00
|
|
|
f"packet checksum (field: {field_name}) on layer: %s" % layer,
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
def assert_ip_checksum_valid(self, received_packet, ignore_zero_checksum=False):
|
|
|
|
self.assert_checksum_valid(
|
|
|
|
received_packet, "IP", ignore_zero_checksum=ignore_zero_checksum
|
|
|
|
)
|
|
|
|
|
|
|
|
def assert_tcp_checksum_valid(self, received_packet, ignore_zero_checksum=False):
|
|
|
|
self.assert_checksum_valid(
|
|
|
|
received_packet, "TCP", ignore_zero_checksum=ignore_zero_checksum
|
|
|
|
)
|
|
|
|
|
|
|
|
def assert_udp_checksum_valid(self, received_packet, ignore_zero_checksum=True):
|
|
|
|
self.assert_checksum_valid(
|
|
|
|
received_packet, "UDP", ignore_zero_checksum=ignore_zero_checksum
|
|
|
|
)
|
2018-05-16 10:52:54 +02:00
|
|
|
|
|
|
|
def assert_embedded_icmp_checksum_valid(self, received_packet):
|
|
|
|
if received_packet.haslayer(IPerror):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_checksum_valid(received_packet, "IPerror")
|
2018-05-16 10:52:54 +02:00
|
|
|
if received_packet.haslayer(TCPerror):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_checksum_valid(received_packet, "TCPerror")
|
2018-05-16 10:52:54 +02:00
|
|
|
if received_packet.haslayer(UDPerror):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_checksum_valid(
|
|
|
|
received_packet, "UDPerror", ignore_zero_checksum=True
|
|
|
|
)
|
2018-05-16 10:52:54 +02:00
|
|
|
if received_packet.haslayer(ICMPerror):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_checksum_valid(received_packet, "ICMPerror")
|
2018-05-16 10:52:54 +02:00
|
|
|
|
|
|
|
def assert_icmp_checksum_valid(self, received_packet):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_checksum_valid(received_packet, "ICMP")
|
2018-05-16 10:52:54 +02:00
|
|
|
self.assert_embedded_icmp_checksum_valid(received_packet)
|
|
|
|
|
|
|
|
def assert_icmpv6_checksum_valid(self, pkt):
|
|
|
|
if pkt.haslayer(ICMPv6DestUnreach):
|
2022-11-14 11:26:18 +01:00
|
|
|
self.assert_checksum_valid(pkt, "ICMPv6DestUnreach")
|
2018-05-16 10:52:54 +02:00
|
|
|
self.assert_embedded_icmp_checksum_valid(pkt)
|
|
|
|
if pkt.haslayer(ICMPv6EchoRequest):
|
2022-11-14 11:26:18 +01:00
|
|
|
self.assert_checksum_valid(pkt, "ICMPv6EchoRequest")
|
2018-05-16 10:52:54 +02:00
|
|
|
if pkt.haslayer(ICMPv6EchoReply):
|
2022-11-14 11:26:18 +01:00
|
|
|
self.assert_checksum_valid(pkt, "ICMPv6EchoReply")
|
2018-05-16 10:52:54 +02:00
|
|
|
|
2022-02-18 10:32:08 +00:00
|
|
|
def get_counter(self, counter):
|
2019-05-16 14:35:46 +02:00
|
|
|
if counter.startswith("/"):
|
|
|
|
counter_value = self.statistics.get_counter(counter)
|
|
|
|
else:
|
2022-04-26 19:02:15 +02:00
|
|
|
counters = self.vapi.cli("sh errors").split("\n")
|
2019-05-16 14:34:55 +02:00
|
|
|
counter_value = 0
|
2019-05-16 14:35:46 +02:00
|
|
|
for i in range(1, len(counters) - 1):
|
|
|
|
results = counters[i].split()
|
|
|
|
if results[1] == counter:
|
|
|
|
counter_value = int(results[0])
|
|
|
|
break
|
|
|
|
return counter_value
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def assert_counter_equal(self, counter, expected_value, thread=None, index=0):
|
2022-02-18 10:32:08 +00:00
|
|
|
c = self.get_counter(counter)
|
|
|
|
if thread is not None:
|
|
|
|
c = c[thread][index]
|
|
|
|
else:
|
|
|
|
c = sum(x[index] for x in c)
|
2023-08-17 19:38:34 +02:00
|
|
|
self.logger.debug(
|
|
|
|
"validate counter `%s[%s]', expected: %s, real value: %s"
|
|
|
|
% (counter, index, expected_value, c)
|
|
|
|
)
|
|
|
|
self.assert_equal(c, expected_value, "counter `%s[%s]'" % (counter, index))
|
2022-02-18 10:32:08 +00:00
|
|
|
|
2018-11-08 11:24:34 +01:00
|
|
|
def assert_packet_counter_equal(self, counter, expected_value):
|
2022-02-18 10:32:08 +00:00
|
|
|
counter_value = self.get_counter(counter)
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_equal(
|
|
|
|
counter_value, expected_value, "packet counter `%s'" % counter
|
|
|
|
)
|
2018-11-08 11:24:34 +01:00
|
|
|
|
2019-05-16 15:01:34 +02:00
|
|
|
def assert_error_counter_equal(self, counter, expected_value):
|
2021-03-18 11:12:01 +01:00
|
|
|
counter_value = self.statistics[counter].sum()
|
2022-04-26 19:02:15 +02:00
|
|
|
self.assert_equal(counter_value, expected_value, "error counter `%s'" % counter)
|
2019-05-16 15:01:34 +02:00
|
|
|
|
2017-02-14 02:55:31 +01:00
|
|
|
@classmethod
|
|
|
|
def sleep(cls, timeout, remark=None):
|
2019-03-10 09:10:54 -07:00
|
|
|
# /* Allow sleep(0) to maintain win32 semantics, and as decreed
|
|
|
|
# * by Guido, only the main thread can be interrupted.
|
|
|
|
# */
|
|
|
|
# https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
|
|
|
|
if timeout == 0:
|
|
|
|
# yield quantum
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(os, "sched_yield"):
|
2019-03-10 09:10:54 -07:00
|
|
|
os.sched_yield()
|
|
|
|
else:
|
|
|
|
time.sleep(0)
|
|
|
|
return
|
|
|
|
|
2019-12-01 22:24:28 -05:00
|
|
|
cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
|
2017-04-19 07:10:58 +00:00
|
|
|
before = time.time()
|
2017-02-02 06:58:07 +01:00
|
|
|
time.sleep(timeout)
|
2017-04-19 07:10:58 +00:00
|
|
|
after = time.time()
|
2019-12-01 22:24:28 -05:00
|
|
|
if after - before > 2 * timeout:
|
2022-04-26 19:02:15 +02:00
|
|
|
cls.logger.error(
|
|
|
|
"unexpected self.sleep() result - slept for %es instead of ~%es!",
|
|
|
|
after - before,
|
|
|
|
timeout,
|
|
|
|
)
|
2019-12-01 22:24:28 -05:00
|
|
|
|
|
|
|
cls.logger.debug(
|
2021-03-12 18:16:10 +01:00
|
|
|
"Finished sleep (%s) - slept %es (wanted %es)",
|
2022-04-26 19:02:15 +02:00
|
|
|
remark,
|
|
|
|
after - before,
|
|
|
|
timeout,
|
|
|
|
)
|
2017-02-02 06:58:07 +01:00
|
|
|
|
2021-08-20 09:18:31 +02:00
|
|
|
def virtual_sleep(self, timeout, remark=None):
|
|
|
|
self.logger.debug("Moving VPP time by %s (%s)", timeout, remark)
|
|
|
|
self.vapi.cli("set clock adjust %s" % timeout)
|
|
|
|
|
2021-02-19 16:39:13 +01:00
|
|
|
def pg_send(self, intf, pkts, worker=None, trace=True):
|
2019-12-23 04:10:25 +00:00
|
|
|
intf.add_stream(pkts, worker=worker)
|
2018-01-08 04:41:42 -08:00
|
|
|
self.pg_enable_capture(self.pg_interfaces)
|
2021-02-19 16:39:13 +01:00
|
|
|
self.pg_start(trace=trace)
|
2019-02-20 09:01:14 -08:00
|
|
|
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
def snapshot_stats(self, stats_diff):
|
|
|
|
"""Return snapshot of interesting stats based on diff dictionary."""
|
|
|
|
stats_snapshot = {}
|
|
|
|
for sw_if_index in stats_diff:
|
|
|
|
for counter in stats_diff[sw_if_index]:
|
|
|
|
stats_snapshot[counter] = self.statistics[counter]
|
|
|
|
self.logger.debug(f"Took statistics stats_snapshot: {stats_snapshot}")
|
|
|
|
return stats_snapshot
|
|
|
|
|
|
|
|
def compare_stats_with_snapshot(self, stats_diff, stats_snapshot):
|
|
|
|
"""Assert appropriate difference between current stats and snapshot."""
|
|
|
|
for sw_if_index in stats_diff:
|
|
|
|
for cntr, diff in stats_diff[sw_if_index].items():
|
|
|
|
if sw_if_index == "err":
|
|
|
|
self.assert_equal(
|
|
|
|
self.statistics[cntr].sum(),
|
|
|
|
stats_snapshot[cntr].sum() + diff,
|
|
|
|
f"'{cntr}' counter value (previous value: "
|
|
|
|
f"{stats_snapshot[cntr].sum()}, "
|
2022-04-26 19:02:15 +02:00
|
|
|
f"expected diff: {diff})",
|
|
|
|
)
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
else:
|
|
|
|
try:
|
|
|
|
self.assert_equal(
|
|
|
|
self.statistics[cntr][:, sw_if_index].sum(),
|
|
|
|
stats_snapshot[cntr][:, sw_if_index].sum() + diff,
|
|
|
|
f"'{cntr}' counter value (previous value: "
|
|
|
|
f"{stats_snapshot[cntr][:, sw_if_index].sum()}, "
|
2022-04-26 19:02:15 +02:00
|
|
|
f"expected diff: {diff})",
|
|
|
|
)
|
2023-01-26 13:14:01 +01:00
|
|
|
except IndexError as e:
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
# if diff is 0, then this most probably a case where
|
|
|
|
# test declares multiple interfaces but traffic hasn't
|
|
|
|
# passed through this one yet - which means the counter
|
|
|
|
# value is 0 and can be ignored
|
|
|
|
if 0 != diff:
|
2023-01-26 13:14:01 +01:00
|
|
|
raise Exception(
|
|
|
|
f"Couldn't sum counter: {cntr} on sw_if_index: {sw_if_index}"
|
|
|
|
) from e
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def send_and_assert_no_replies(
|
|
|
|
self, intf, pkts, remark="", timeout=None, stats_diff=None, trace=True, msg=None
|
|
|
|
):
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
if stats_diff:
|
|
|
|
stats_snapshot = self.snapshot_stats(stats_diff)
|
|
|
|
|
2019-02-20 09:01:14 -08:00
|
|
|
self.pg_send(intf, pkts)
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
if not timeout:
|
|
|
|
timeout = 1
|
|
|
|
for i in self.pg_interfaces:
|
2022-02-18 10:35:08 +00:00
|
|
|
i.assert_nothing_captured(timeout=timeout, remark=remark)
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
timeout = 0.1
|
|
|
|
finally:
|
|
|
|
if trace:
|
|
|
|
if msg:
|
|
|
|
self.logger.debug(f"send_and_assert_no_replies: {msg}")
|
|
|
|
self.logger.debug(self.vapi.cli("show trace"))
|
|
|
|
|
|
|
|
if stats_diff:
|
|
|
|
self.compare_stats_with_snapshot(stats_diff, stats_snapshot)
|
2018-01-08 04:41:42 -08:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def send_and_expect(
|
|
|
|
self,
|
|
|
|
intf,
|
|
|
|
pkts,
|
|
|
|
output,
|
|
|
|
n_rx=None,
|
|
|
|
worker=None,
|
|
|
|
trace=True,
|
|
|
|
msg=None,
|
|
|
|
stats_diff=None,
|
|
|
|
):
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
if stats_diff:
|
|
|
|
stats_snapshot = self.snapshot_stats(stats_diff)
|
|
|
|
|
2019-03-28 08:56:10 +00:00
|
|
|
if not n_rx:
|
2022-01-10 21:57:27 +00:00
|
|
|
n_rx = 1 if isinstance(pkts, Packet) else len(pkts)
|
2021-02-19 16:39:13 +01:00
|
|
|
self.pg_send(intf, pkts, worker=worker, trace=trace)
|
2019-03-28 08:56:10 +00:00
|
|
|
rx = output.get_capture(n_rx)
|
2021-05-26 13:02:35 +02:00
|
|
|
if trace:
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
if msg:
|
|
|
|
self.logger.debug(f"send_and_expect: {msg}")
|
2021-05-26 13:02:35 +02:00
|
|
|
self.logger.debug(self.vapi.cli("show trace"))
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
|
|
|
|
if stats_diff:
|
|
|
|
self.compare_stats_with_snapshot(stats_diff, stats_snapshot)
|
|
|
|
|
2018-10-10 07:22:51 -07:00
|
|
|
return rx
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def send_and_expect_load_balancing(
|
|
|
|
self, input, pkts, outputs, worker=None, trace=True
|
|
|
|
):
|
2022-02-17 09:22:16 +00:00
|
|
|
self.pg_send(input, pkts, worker=worker, trace=trace)
|
|
|
|
rxs = []
|
|
|
|
for oo in outputs:
|
|
|
|
rx = oo._get_capture(1)
|
|
|
|
self.assertNotEqual(0, len(rx))
|
|
|
|
rxs.append(rx)
|
|
|
|
if trace:
|
|
|
|
self.logger.debug(self.vapi.cli("show trace"))
|
|
|
|
return rxs
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def send_and_expect_some(self, intf, pkts, output, worker=None, trace=True):
|
2022-02-17 09:08:47 +00:00
|
|
|
self.pg_send(intf, pkts, worker=worker, trace=trace)
|
|
|
|
rx = output._get_capture(1)
|
|
|
|
if trace:
|
|
|
|
self.logger.debug(self.vapi.cli("show trace"))
|
|
|
|
self.assertTrue(len(rx) > 0)
|
|
|
|
self.assertTrue(len(rx) < len(pkts))
|
|
|
|
return rx
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def send_and_expect_only(self, intf, pkts, output, timeout=None, stats_diff=None):
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
if stats_diff:
|
|
|
|
stats_snapshot = self.snapshot_stats(stats_diff)
|
|
|
|
|
2019-02-20 09:01:14 -08:00
|
|
|
self.pg_send(intf, pkts)
|
2019-01-07 16:29:26 -08:00
|
|
|
rx = output.get_capture(len(pkts))
|
|
|
|
outputs = [output]
|
2018-10-10 07:22:51 -07:00
|
|
|
if not timeout:
|
|
|
|
timeout = 1
|
|
|
|
for i in self.pg_interfaces:
|
|
|
|
if i not in outputs:
|
2022-02-18 10:35:08 +00:00
|
|
|
i.assert_nothing_captured(timeout=timeout)
|
2018-10-10 07:22:51 -07:00
|
|
|
timeout = 0.1
|
|
|
|
|
tests: add enhanced packet counter verification
Add support for inline packet counter verification to send_and_* functions.
Diff dictionary is a dictionary of dictionaries of interesting stats:
diff_dictionary =
{
"err" : { '/error/counter1' : 4, },
sw_if_index1 : { '/stat/segment/counter1' : 5,
'/stat/segment/counter2' : 6,
},
sw_if_index2 : { '/stat/segment/counter1' : 7,
},
}
It describes a per sw-if-index diffset, where each key is stat segment
path and value is the expected change for that counter for sw-if-index.
Special case string "err" is used for error counters.
This then allows more precise packet counter verification by first
defining a "zero" dictionary, e.g. for ED NAT:
cls.no_diff = StatsDiff({
pg.sw_if_index: {
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
'/nat44-ed/in2out/fastpath/tcp': 0,
'/nat44-ed/in2out/fastpath/udp': 0,
'/nat44-ed/in2out/fastpath/icmp': 0,
'/nat44-ed/in2out/fastpath/drops': 0,
'/nat44-ed/in2out/slowpath/tcp': 0,
'/nat44-ed/in2out/slowpath/udp': 0,
'/nat44-ed/in2out/slowpath/icmp': 0,
'/nat44-ed/in2out/slowpath/drops': 0,
}
for pg in cls.pg_interfaces
})
and then to specify only changed counters directly when calling
one of send_and_* functions:
self.send_and_assert_no_replies(
self.pg0, pkts, msg="i2o pkts",
stats_diff=self.no_diff | {
"err": {
'/err/nat44-ed-in2out-slowpath/out of ports': len(pkts),
},
self.pg0.sw_if_index: {
'/nat44-ed/in2out/slowpath/drops': len(pkts),
},
}
)
operator | is overloaded by StatsDiff class to perform a deep merge operation,
so in above case, dictionaries for "err" and self.pg0.sw_if_index do not
overwrite whole sub-dictionaries, rather the contents are merged,
assuring that all the remaining counters are verified to be zero.
Type: improvement
Signed-off-by: Klement Sekera <klement.sekera@gmail.com>
Change-Id: I2b87f7bd58a7d4b34ee72344e2f871b2f372e2d9
2022-02-18 10:34:35 +00:00
|
|
|
if stats_diff:
|
|
|
|
self.compare_stats_with_snapshot(stats_diff, stats_snapshot)
|
|
|
|
|
2018-01-08 04:41:42 -08:00
|
|
|
return rx
|
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
|
2018-07-16 14:22:01 +02:00
|
|
|
def get_testcase_doc_name(test):
|
|
|
|
return getdoc(test.__class__).splitlines()[0]
|
|
|
|
|
|
|
|
|
2018-11-22 10:01:09 +00:00
|
|
|
def get_test_description(descriptions, test):
|
|
|
|
short_description = test.shortDescription()
|
|
|
|
if descriptions and short_description:
|
|
|
|
return short_description
|
|
|
|
else:
|
|
|
|
return str(test)
|
|
|
|
|
|
|
|
|
2018-09-21 13:55:16 +02:00
|
|
|
class TestCaseInfo(object):
|
|
|
|
def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
|
|
|
|
self.logger = logger
|
|
|
|
self.tempdir = tempdir
|
|
|
|
self.vpp_pid = vpp_pid
|
|
|
|
self.vpp_bin_path = vpp_bin_path
|
|
|
|
self.core_crash_test = None
|
2017-03-07 11:39:27 +01:00
|
|
|
|
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
class VppTestResult(unittest.TestResult):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
@property result_string
|
|
|
|
String variable to store the test case result string.
|
|
|
|
@property errors
|
|
|
|
List variable containing 2-tuples of TestCase instances and strings
|
|
|
|
holding formatted tracebacks. Each tuple represents a test which
|
|
|
|
raised an unexpected exception.
|
|
|
|
@property failures
|
|
|
|
List variable containing 2-tuples of TestCase instances and strings
|
|
|
|
holding formatted tracebacks. Each tuple represents a test where
|
|
|
|
a failure was explicitly signalled using the TestCase.assert*()
|
|
|
|
methods.
|
|
|
|
"""
|
|
|
|
|
2018-09-21 13:55:16 +02:00
|
|
|
failed_test_cases_info = set()
|
|
|
|
core_crash_test_cases_info = set()
|
|
|
|
current_test_case_info = None
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def __init__(self, stream=None, descriptions=None, verbosity=None, runner=None):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
2017-01-04 12:58:53 +01:00
|
|
|
:param stream File descriptor to store where to report test results.
|
|
|
|
Set to the standard error stream by default.
|
|
|
|
:param descriptions Boolean variable to store information if to use
|
|
|
|
test case descriptions.
|
2016-10-11 11:47:09 +02:00
|
|
|
:param verbosity Integer variable to store required verbosity level.
|
|
|
|
"""
|
2019-01-13 16:09:10 -08:00
|
|
|
super(VppTestResult, self).__init__(stream, descriptions, verbosity)
|
2016-10-03 19:44:57 +02:00
|
|
|
self.stream = stream
|
|
|
|
self.descriptions = descriptions
|
|
|
|
self.verbosity = verbosity
|
2023-03-29 16:04:58 +02:00
|
|
|
self.result_code = TestResultCode.TEST_RUN
|
2016-10-03 19:44:57 +02:00
|
|
|
self.result_string = None
|
2018-11-16 17:28:56 +01:00
|
|
|
self.runner = runner
|
2021-04-08 19:37:41 +02:00
|
|
|
self.printed = []
|
2016-10-03 19:44:57 +02:00
|
|
|
|
|
|
|
def addSuccess(self, test):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Record a test succeeded result
|
|
|
|
|
|
|
|
:param test:
|
|
|
|
|
|
|
|
"""
|
2023-03-29 16:04:58 +02:00
|
|
|
self.log_result("addSuccess", test)
|
2016-10-03 19:44:57 +02:00
|
|
|
unittest.TestResult.addSuccess(self, test)
|
2016-10-28 13:20:27 +02:00
|
|
|
self.result_string = colorize("OK", GREEN)
|
2023-03-29 16:04:58 +02:00
|
|
|
self.result_code = TestResultCode.PASS
|
|
|
|
self.send_result_through_pipe(test, self.result_code)
|
|
|
|
|
|
|
|
def addExpectedFailure(self, test, err):
|
|
|
|
self.log_result("addExpectedFailure", test, err)
|
|
|
|
super().addExpectedFailure(test, err)
|
|
|
|
self.result_string = colorize("FAIL", GREEN)
|
|
|
|
self.result_code = TestResultCode.EXPECTED_FAIL
|
|
|
|
self.send_result_through_pipe(test, self.result_code)
|
|
|
|
|
|
|
|
def addUnexpectedSuccess(self, test):
|
|
|
|
self.log_result("addUnexpectedSuccess", test)
|
|
|
|
super().addUnexpectedSuccess(test)
|
|
|
|
self.result_string = colorize("OK", RED)
|
|
|
|
self.result_code = TestResultCode.UNEXPECTED_PASS
|
|
|
|
self.send_result_through_pipe(test, self.result_code)
|
2018-09-19 15:01:47 +02:00
|
|
|
|
2016-10-11 11:47:09 +02:00
|
|
|
def addSkip(self, test, reason):
|
|
|
|
"""
|
|
|
|
Record a test skipped.
|
|
|
|
|
|
|
|
:param test:
|
|
|
|
:param reason:
|
|
|
|
|
|
|
|
"""
|
2023-03-29 16:04:58 +02:00
|
|
|
self.log_result("addSkip", test, reason=reason)
|
2016-10-11 11:47:09 +02:00
|
|
|
unittest.TestResult.addSkip(self, test, reason)
|
2016-10-28 13:20:27 +02:00
|
|
|
self.result_string = colorize("SKIP", YELLOW)
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2021-04-08 19:37:41 +02:00
|
|
|
if reason == "not enough cpus":
|
2023-03-29 16:04:58 +02:00
|
|
|
self.result_code = TestResultCode.SKIP_CPU_SHORTAGE
|
2021-04-08 19:37:41 +02:00
|
|
|
else:
|
2023-03-29 16:04:58 +02:00
|
|
|
self.result_code = TestResultCode.SKIP
|
|
|
|
self.send_result_through_pipe(test, self.result_code)
|
2018-09-19 15:01:47 +02:00
|
|
|
|
2018-09-21 13:55:16 +02:00
|
|
|
def symlink_failed(self):
|
|
|
|
if self.current_test_case_info:
|
2017-08-15 07:09:02 +02:00
|
|
|
try:
|
2021-05-31 16:08:53 +02:00
|
|
|
failed_dir = config.failed_dir
|
2018-09-21 13:55:16 +02:00
|
|
|
link_path = os.path.join(
|
|
|
|
failed_dir,
|
2022-04-26 19:02:15 +02:00
|
|
|
"%s-FAILED" % os.path.basename(self.current_test_case_info.tempdir),
|
|
|
|
)
|
2019-12-01 22:24:28 -05:00
|
|
|
|
|
|
|
self.current_test_case_info.logger.debug(
|
2022-04-26 19:02:15 +02:00
|
|
|
"creating a link to the failed test"
|
|
|
|
)
|
2019-12-01 22:24:28 -05:00
|
|
|
self.current_test_case_info.logger.debug(
|
2022-04-26 19:02:15 +02:00
|
|
|
"os.symlink(%s, %s)"
|
|
|
|
% (self.current_test_case_info.tempdir, link_path)
|
|
|
|
)
|
2018-07-16 14:22:01 +02:00
|
|
|
if os.path.exists(link_path):
|
2022-04-26 19:02:15 +02:00
|
|
|
self.current_test_case_info.logger.debug("symlink already exists")
|
2018-07-16 14:22:01 +02:00
|
|
|
else:
|
2018-09-21 13:55:16 +02:00
|
|
|
os.symlink(self.current_test_case_info.tempdir, link_path)
|
2018-07-16 14:22:01 +02:00
|
|
|
|
2017-08-15 07:09:02 +02:00
|
|
|
except Exception as e:
|
2019-12-01 22:24:28 -05:00
|
|
|
self.current_test_case_info.logger.error(e)
|
2017-08-15 07:09:02 +02:00
|
|
|
|
2018-09-19 15:01:47 +02:00
|
|
|
def send_result_through_pipe(self, test, result):
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(self, "test_framework_result_pipe"):
|
2018-09-19 15:01:47 +02:00
|
|
|
pipe = self.test_framework_result_pipe
|
2018-08-30 10:51:45 +02:00
|
|
|
if pipe:
|
2018-09-19 15:01:47 +02:00
|
|
|
pipe.send((test.id(), result))
|
2018-08-30 10:51:45 +02:00
|
|
|
|
2023-03-29 16:04:58 +02:00
|
|
|
def log_result(self, fn, test, err=None, reason=None):
|
2018-09-21 13:55:16 +02:00
|
|
|
if self.current_test_case_info:
|
|
|
|
if isinstance(test, unittest.suite._ErrorHolder):
|
|
|
|
test_name = test.description
|
|
|
|
else:
|
2022-04-26 19:02:15 +02:00
|
|
|
test_name = "%s.%s(%s)" % (
|
|
|
|
test.__class__.__name__,
|
|
|
|
test._testMethodName,
|
|
|
|
test._testMethodDoc,
|
|
|
|
)
|
2023-03-29 16:04:58 +02:00
|
|
|
extra_msg = ""
|
|
|
|
if err:
|
|
|
|
extra_msg += f", error is {err}"
|
|
|
|
if reason:
|
|
|
|
extra_msg += f", reason is {reason}"
|
2018-09-21 13:55:16 +02:00
|
|
|
self.current_test_case_info.logger.debug(
|
2023-03-29 16:04:58 +02:00
|
|
|
f"--- {fn}() {test_name} called{extra_msg}"
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
2023-03-29 16:04:58 +02:00
|
|
|
if err:
|
|
|
|
self.current_test_case_info.logger.debug(
|
|
|
|
"formatted exception is:\n%s" % "".join(format_exception(*err))
|
|
|
|
)
|
2018-09-21 13:55:16 +02:00
|
|
|
|
2023-03-29 16:04:58 +02:00
|
|
|
def add_error(self, test, err, unittest_fn, result_code):
|
|
|
|
self.result_code = result_code
|
|
|
|
if result_code == TestResultCode.FAIL:
|
|
|
|
self.log_result("addFailure", test, err=err)
|
2018-09-21 13:55:16 +02:00
|
|
|
error_type_str = colorize("FAIL", RED)
|
2023-03-29 16:04:58 +02:00
|
|
|
elif result_code == TestResultCode.ERROR:
|
|
|
|
self.log_result("addError", test, err=err)
|
2018-09-21 13:55:16 +02:00
|
|
|
error_type_str = colorize("ERROR", RED)
|
|
|
|
else:
|
2023-03-29 16:04:58 +02:00
|
|
|
raise Exception(f"Unexpected result code {result_code}")
|
2018-09-21 13:55:16 +02:00
|
|
|
|
|
|
|
unittest_fn(self, test, err)
|
|
|
|
if self.current_test_case_info:
|
2022-04-26 19:02:15 +02:00
|
|
|
self.result_string = "%s [ temp dir used by test case: %s ]" % (
|
|
|
|
error_type_str,
|
|
|
|
self.current_test_case_info.tempdir,
|
|
|
|
)
|
2018-09-21 13:55:16 +02:00
|
|
|
self.symlink_failed()
|
|
|
|
self.failed_test_cases_info.add(self.current_test_case_info)
|
|
|
|
if is_core_present(self.current_test_case_info.tempdir):
|
|
|
|
if not self.current_test_case_info.core_crash_test:
|
|
|
|
if isinstance(test, unittest.suite._ErrorHolder):
|
|
|
|
test_name = str(test)
|
|
|
|
else:
|
2019-03-06 08:23:58 -08:00
|
|
|
test_name = "'{!s}' ({!s})".format(
|
2022-04-26 19:02:15 +02:00
|
|
|
get_testcase_doc_name(test), test.id()
|
|
|
|
)
|
2018-09-21 13:55:16 +02:00
|
|
|
self.current_test_case_info.core_crash_test = test_name
|
2022-04-26 19:02:15 +02:00
|
|
|
self.core_crash_test_cases_info.add(self.current_test_case_info)
|
2018-09-21 13:55:16 +02:00
|
|
|
else:
|
2022-04-26 19:02:15 +02:00
|
|
|
self.result_string = "%s [no temp dir]" % error_type_str
|
2018-09-21 13:55:16 +02:00
|
|
|
|
2023-03-29 16:04:58 +02:00
|
|
|
self.send_result_through_pipe(test, result_code)
|
2018-09-21 13:55:16 +02:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
def addFailure(self, test, err):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Record a test failed result
|
|
|
|
|
|
|
|
:param test:
|
|
|
|
:param err: error message
|
|
|
|
|
|
|
|
"""
|
2023-03-29 16:04:58 +02:00
|
|
|
self.add_error(test, err, unittest.TestResult.addFailure, TestResultCode.FAIL)
|
2018-09-19 15:01:47 +02:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
def addError(self, test, err):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Record a test error result
|
|
|
|
|
|
|
|
:param test:
|
|
|
|
:param err: error message
|
|
|
|
|
|
|
|
"""
|
2023-03-29 16:04:58 +02:00
|
|
|
self.add_error(test, err, unittest.TestResult.addError, TestResultCode.ERROR)
|
2018-09-19 15:01:47 +02:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
def getDescription(self, test):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Get test description
|
|
|
|
|
|
|
|
:param test:
|
|
|
|
:returns: test description
|
|
|
|
|
|
|
|
"""
|
2018-11-22 10:01:09 +00:00
|
|
|
return get_test_description(self.descriptions, test)
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
def startTest(self, test):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Start a test
|
|
|
|
|
|
|
|
:param test:
|
|
|
|
|
|
|
|
"""
|
2019-01-13 16:09:10 -08:00
|
|
|
|
|
|
|
def print_header(test):
|
2021-04-08 19:37:41 +02:00
|
|
|
if test.__class__ in self.printed:
|
|
|
|
return
|
|
|
|
|
2020-08-26 14:33:54 +00:00
|
|
|
test_doc = getdoc(test)
|
|
|
|
if not test_doc:
|
|
|
|
raise Exception("No doc string for test '%s'" % test.id())
|
2021-04-08 19:37:41 +02:00
|
|
|
|
2021-03-25 14:03:44 +01:00
|
|
|
test_title = test_doc.splitlines()[0].rstrip()
|
2021-04-08 19:37:41 +02:00
|
|
|
test_title = colorize(test_title, GREEN)
|
2021-01-14 10:19:08 +00:00
|
|
|
if test.is_tagged_run_solo():
|
2021-04-08 19:37:41 +02:00
|
|
|
test_title = colorize(f"SOLO RUN: {test_title}", YELLOW)
|
2020-08-26 14:33:54 +00:00
|
|
|
|
2021-01-20 20:30:36 +00:00
|
|
|
# This block may overwrite the colorized title above,
|
|
|
|
# but we want this to stand out and be fixed
|
|
|
|
if test.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
|
2022-04-26 19:02:15 +02:00
|
|
|
test_title = colorize(f"FIXME with VPP workers: {test_title}", RED)
|
2021-04-08 19:37:41 +02:00
|
|
|
|
2021-09-09 17:57:02 -07:00
|
|
|
if test.has_tag(TestCaseTag.FIXME_ASAN):
|
2022-04-26 19:02:15 +02:00
|
|
|
test_title = colorize(f"FIXME with ASAN: {test_title}", RED)
|
2021-09-09 17:57:02 -07:00
|
|
|
test.skip_fixme_asan()
|
|
|
|
|
2022-09-18 22:28:44 -04:00
|
|
|
if is_distro_ubuntu2204 == True and test.has_tag(
|
|
|
|
TestCaseTag.FIXME_UBUNTU2204
|
|
|
|
):
|
|
|
|
test_title = colorize(f"FIXME on Ubuntu-22.04: {test_title}", RED)
|
|
|
|
test.skip_fixme_ubuntu2204()
|
|
|
|
|
2022-09-20 21:52:18 -04:00
|
|
|
if is_distro_debian11 == True and test.has_tag(TestCaseTag.FIXME_DEBIAN11):
|
|
|
|
test_title = colorize(f"FIXME on Debian-11: {test_title}", RED)
|
|
|
|
test.skip_fixme_debian11()
|
|
|
|
|
2022-10-04 22:02:49 -04:00
|
|
|
if "debug" in config.vpp_tag and test.has_tag(TestCaseTag.FIXME_VPP_DEBUG):
|
|
|
|
test_title = colorize(f"FIXME on VPP Debug: {test_title}", RED)
|
|
|
|
test.skip_fixme_vpp_debug()
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(test, "vpp_worker_count"):
|
2021-03-25 14:03:44 +01:00
|
|
|
if test.vpp_worker_count == 0:
|
|
|
|
test_title += " [main thread only]"
|
|
|
|
elif test.vpp_worker_count == 1:
|
|
|
|
test_title += " [1 worker thread]"
|
|
|
|
else:
|
|
|
|
test_title += f" [{test.vpp_worker_count} worker threads]"
|
|
|
|
|
2021-04-08 19:37:41 +02:00
|
|
|
if test.__class__.skipped_due_to_cpu_lack:
|
|
|
|
test_title = colorize(
|
|
|
|
f"{test_title} [skipped - not enough cpus, "
|
|
|
|
f"required={test.__class__.get_cpus_required()}, "
|
2022-04-26 19:02:15 +02:00
|
|
|
f"available={max_vpp_cpus}]",
|
|
|
|
YELLOW,
|
|
|
|
)
|
2021-04-08 19:37:41 +02:00
|
|
|
|
|
|
|
print(double_line_delim)
|
|
|
|
print(test_title)
|
|
|
|
print(double_line_delim)
|
|
|
|
self.printed.append(test.__class__)
|
2019-01-13 16:09:10 -08:00
|
|
|
|
|
|
|
print_header(test)
|
2019-11-28 14:48:44 +01:00
|
|
|
self.start_test = time.time()
|
2016-10-03 19:44:57 +02:00
|
|
|
unittest.TestResult.startTest(self, test)
|
|
|
|
if self.verbosity > 0:
|
2022-04-26 19:02:15 +02:00
|
|
|
self.stream.writeln("Starting " + self.getDescription(test) + " ...")
|
2016-10-11 11:47:09 +02:00
|
|
|
self.stream.writeln(single_line_delim)
|
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
def stopTest(self, test):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
2018-08-30 10:51:45 +02:00
|
|
|
Called when the given test has been run
|
2016-10-11 11:47:09 +02:00
|
|
|
|
|
|
|
:param test:
|
|
|
|
|
|
|
|
"""
|
2016-10-03 19:44:57 +02:00
|
|
|
unittest.TestResult.stopTest(self, test)
|
2019-11-28 14:48:44 +01:00
|
|
|
|
2023-03-29 16:04:58 +02:00
|
|
|
result_code_to_suffix = {
|
|
|
|
TestResultCode.PASS: "",
|
|
|
|
TestResultCode.FAIL: "",
|
|
|
|
TestResultCode.ERROR: "",
|
|
|
|
TestResultCode.SKIP: "",
|
|
|
|
TestResultCode.TEST_RUN: "",
|
|
|
|
TestResultCode.SKIP_CPU_SHORTAGE: "",
|
|
|
|
TestResultCode.EXPECTED_FAIL: " [EXPECTED FAIL]",
|
|
|
|
TestResultCode.UNEXPECTED_PASS: " [UNEXPECTED PASS]",
|
|
|
|
}
|
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
if self.verbosity > 0:
|
2016-10-11 11:47:09 +02:00
|
|
|
self.stream.writeln(single_line_delim)
|
2022-04-26 19:02:15 +02:00
|
|
|
self.stream.writeln(
|
2023-03-29 16:04:58 +02:00
|
|
|
"%-72s%s%s"
|
|
|
|
% (
|
|
|
|
self.getDescription(test),
|
|
|
|
self.result_string,
|
|
|
|
result_code_to_suffix[self.result_code],
|
|
|
|
)
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
2016-10-11 11:47:09 +02:00
|
|
|
self.stream.writeln(single_line_delim)
|
2016-10-03 19:44:57 +02:00
|
|
|
else:
|
2022-04-26 19:02:15 +02:00
|
|
|
self.stream.writeln(
|
2023-03-29 16:04:58 +02:00
|
|
|
"%-67s %4.2f %s%s"
|
2022-04-26 19:02:15 +02:00
|
|
|
% (
|
|
|
|
self.getDescription(test),
|
|
|
|
time.time() - self.start_test,
|
|
|
|
self.result_string,
|
2023-03-29 16:04:58 +02:00
|
|
|
result_code_to_suffix[self.result_code],
|
2022-04-26 19:02:15 +02:00
|
|
|
)
|
|
|
|
)
|
2018-09-19 15:01:47 +02:00
|
|
|
|
2023-03-29 16:04:58 +02:00
|
|
|
self.send_result_through_pipe(test, TestResultCode.TEST_RUN)
|
2016-10-03 19:44:57 +02:00
|
|
|
|
|
|
|
def printErrors(self):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Print errors from running the test case
|
|
|
|
"""
|
2018-11-16 17:28:56 +01:00
|
|
|
if len(self.errors) > 0 or len(self.failures) > 0:
|
|
|
|
self.stream.writeln()
|
2022-04-26 19:02:15 +02:00
|
|
|
self.printErrorList("ERROR", self.errors)
|
|
|
|
self.printErrorList("FAIL", self.failures)
|
2018-11-16 17:28:56 +01:00
|
|
|
|
|
|
|
# ^^ that is the last output from unittest before summary
|
|
|
|
if not self.runner.print_summary:
|
2022-04-26 19:02:15 +02:00
|
|
|
devnull = unittest.runner._WritelnDecorator(open(os.devnull, "w"))
|
2018-11-16 17:28:56 +01:00
|
|
|
self.stream = devnull
|
|
|
|
self.runner.stream = devnull
|
2016-10-11 11:47:09 +02:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
def printErrorList(self, flavour, errors):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Print error list to the output stream together with error type
|
|
|
|
and test case description.
|
|
|
|
|
|
|
|
:param flavour: error type
|
|
|
|
:param errors: iterable errors
|
|
|
|
|
|
|
|
"""
|
2016-10-03 19:44:57 +02:00
|
|
|
for test, err in errors:
|
2016-10-11 11:47:09 +02:00
|
|
|
self.stream.writeln(double_line_delim)
|
2022-04-26 19:02:15 +02:00
|
|
|
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
|
2016-10-11 11:47:09 +02:00
|
|
|
self.stream.writeln(single_line_delim)
|
2016-10-03 19:44:57 +02:00
|
|
|
self.stream.writeln("%s" % err)
|
|
|
|
|
|
|
|
|
|
|
|
class VppTestRunner(unittest.TextTestRunner):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
2017-02-03 07:29:43 +01:00
|
|
|
A basic test runner implementation which prints results to standard error.
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
2018-12-09 15:37:04 -08:00
|
|
|
|
2016-10-11 11:47:09 +02:00
|
|
|
@property
|
|
|
|
def resultclass(self):
|
|
|
|
"""Class maintaining the results of the tests"""
|
|
|
|
return VppTestResult
|
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
keep_alive_pipe=None,
|
|
|
|
descriptions=True,
|
|
|
|
verbosity=1,
|
|
|
|
result_pipe=None,
|
|
|
|
failfast=False,
|
|
|
|
buffer=False,
|
|
|
|
resultclass=None,
|
|
|
|
print_summary=True,
|
|
|
|
**kwargs,
|
|
|
|
):
|
2017-01-17 13:42:48 +01:00
|
|
|
# ignore stream setting here, use hard-coded stdout to be in sync
|
|
|
|
# with prints from VppTestCase methods ...
|
2022-04-26 19:02:15 +02:00
|
|
|
super(VppTestRunner, self).__init__(
|
|
|
|
sys.stdout, descriptions, verbosity, failfast, buffer, resultclass, **kwargs
|
|
|
|
)
|
2018-11-21 13:20:43 +01:00
|
|
|
KeepAliveReporter.pipe = keep_alive_pipe
|
2017-02-03 07:29:43 +01:00
|
|
|
|
2018-11-16 17:28:56 +01:00
|
|
|
self.orig_stream = self.stream
|
|
|
|
self.resultclass.test_framework_result_pipe = result_pipe
|
|
|
|
|
|
|
|
self.print_summary = print_summary
|
|
|
|
|
|
|
|
def _makeResult(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
return self.resultclass(self.stream, self.descriptions, self.verbosity, self)
|
2018-08-30 10:51:45 +02:00
|
|
|
|
2016-10-03 19:44:57 +02:00
|
|
|
def run(self, test):
|
2016-10-11 11:47:09 +02:00
|
|
|
"""
|
|
|
|
Run the tests
|
|
|
|
|
|
|
|
:param test:
|
|
|
|
|
|
|
|
"""
|
2017-06-07 08:19:47 +02:00
|
|
|
faulthandler.enable() # emit stack trace to stderr if killed by signal
|
2018-07-16 14:22:01 +02:00
|
|
|
|
|
|
|
result = super(VppTestRunner, self).run(test)
|
2018-11-16 17:28:56 +01:00
|
|
|
if not self.print_summary:
|
|
|
|
self.stream = self.orig_stream
|
|
|
|
result.stream = self.orig_stream
|
2018-07-16 14:22:01 +02:00
|
|
|
return result
|
2017-10-16 04:20:13 -07:00
|
|
|
|
|
|
|
|
|
|
|
class Worker(Thread):
|
2019-12-04 19:43:53 -05:00
|
|
|
def __init__(self, executable_args, logger, env=None, *args, **kwargs):
|
|
|
|
super(Worker, self).__init__(*args, **kwargs)
|
2017-10-16 04:20:13 -07:00
|
|
|
self.logger = logger
|
2019-12-04 19:43:53 -05:00
|
|
|
self.args = executable_args
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(self, "testcase") and self.testcase.debug_all:
|
2019-10-21 02:53:14 +00:00
|
|
|
if self.testcase.debug_gdbserver:
|
2022-04-26 19:02:15 +02:00
|
|
|
self.args = [
|
|
|
|
"/usr/bin/gdbserver",
|
|
|
|
"localhost:{port}".format(port=self.testcase.gdbserver_port),
|
|
|
|
] + args
|
|
|
|
elif self.testcase.debug_gdb and hasattr(self, "wait_for_gdb"):
|
2019-10-21 02:53:14 +00:00
|
|
|
self.args.append(self.wait_for_gdb)
|
2019-12-04 19:43:53 -05:00
|
|
|
self.app_bin = executable_args[0]
|
2019-10-21 02:53:14 +00:00
|
|
|
self.app_name = os.path.basename(self.app_bin)
|
2022-04-26 19:02:15 +02:00
|
|
|
if hasattr(self, "role"):
|
|
|
|
self.app_name += " {role}".format(role=self.role)
|
tests: Have worker return immediately on bad executable
No reason to waste CI cycles if we know the test will not run.
See:
17:55:11 ==============================================================================
17:55:11 VPP Object Model Test
17:55:11 ==============================================================================
17:55:11 Exception in thread Thread-4:
17:55:11 Traceback (most recent call last):
17:55:11 File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
17:55:11 self.run()
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/framework.py", line 1475, in run
17:55:11 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/run/venv/local/lib/python2.7/site-packages/subprocess32.py", line 617, in __init__
17:55:11 restore_signals, start_new_session)
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/run/venv/local/lib/python2.7/site-packages/subprocess32.py", line 1415, in _execute_child
17:55:11 raise child_exception_type(errno_num, err_msg)
17:55:11 OSError: [Errno 2] No such file or directory: '/w/workspace/vpp-beta-verify-master-ubuntu1804/test/build/vom_test/vom_test'
17:55:11
17:55:11 17:55:11,328 Timeout! Worker did not finish in 120s
17:55:11 run C++ VOM tests ERROR [ temp dir used by test case: /tmp/vpp-unittest-VOMTestCase-vpMcWF ]
17:55:11
Type: fix
Change-Id: I3d8252807e98a09a8abd70de8a22517151f9d786
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2019-06-30 15:38:55 -04:00
|
|
|
self.process = None
|
2017-10-16 04:20:13 -07:00
|
|
|
self.result = None
|
tests: Have worker return immediately on bad executable
No reason to waste CI cycles if we know the test will not run.
See:
17:55:11 ==============================================================================
17:55:11 VPP Object Model Test
17:55:11 ==============================================================================
17:55:11 Exception in thread Thread-4:
17:55:11 Traceback (most recent call last):
17:55:11 File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
17:55:11 self.run()
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/framework.py", line 1475, in run
17:55:11 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/run/venv/local/lib/python2.7/site-packages/subprocess32.py", line 617, in __init__
17:55:11 restore_signals, start_new_session)
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/run/venv/local/lib/python2.7/site-packages/subprocess32.py", line 1415, in _execute_child
17:55:11 raise child_exception_type(errno_num, err_msg)
17:55:11 OSError: [Errno 2] No such file or directory: '/w/workspace/vpp-beta-verify-master-ubuntu1804/test/build/vom_test/vom_test'
17:55:11
17:55:11 17:55:11,328 Timeout! Worker did not finish in 120s
17:55:11 run C++ VOM tests ERROR [ temp dir used by test case: /tmp/vpp-unittest-VOMTestCase-vpMcWF ]
17:55:11
Type: fix
Change-Id: I3d8252807e98a09a8abd70de8a22517151f9d786
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2019-06-30 15:38:55 -04:00
|
|
|
env = {} if env is None else env
|
2018-02-26 14:40:13 -05:00
|
|
|
self.env = copy.deepcopy(env)
|
2017-10-16 04:20:13 -07:00
|
|
|
|
2019-10-21 02:53:14 +00:00
|
|
|
def wait_for_enter(self):
|
2022-04-26 19:02:15 +02:00
|
|
|
if not hasattr(self, "testcase"):
|
2019-10-21 02:53:14 +00:00
|
|
|
return
|
|
|
|
if self.testcase.debug_all and self.testcase.debug_gdbserver:
|
|
|
|
print()
|
|
|
|
print(double_line_delim)
|
2022-04-26 19:02:15 +02:00
|
|
|
print(
|
|
|
|
"Spawned GDB Server for '{app}' with PID: {pid}".format(
|
|
|
|
app=self.app_name, pid=self.process.pid
|
|
|
|
)
|
|
|
|
)
|
2019-10-21 02:53:14 +00:00
|
|
|
elif self.testcase.debug_all and self.testcase.debug_gdb:
|
|
|
|
print()
|
|
|
|
print(double_line_delim)
|
2022-04-26 19:02:15 +02:00
|
|
|
print(
|
|
|
|
"Spawned '{app}' with PID: {pid}".format(
|
|
|
|
app=self.app_name, pid=self.process.pid
|
|
|
|
)
|
|
|
|
)
|
2019-10-21 02:53:14 +00:00
|
|
|
else:
|
|
|
|
return
|
|
|
|
print(single_line_delim)
|
|
|
|
print("You can debug '{app}' using:".format(app=self.app_name))
|
|
|
|
if self.testcase.debug_gdbserver:
|
2022-04-26 19:02:15 +02:00
|
|
|
print(
|
|
|
|
"sudo gdb "
|
|
|
|
+ self.app_bin
|
|
|
|
+ " -ex 'target remote localhost:{port}'".format(
|
|
|
|
port=self.testcase.gdbserver_port
|
|
|
|
)
|
|
|
|
)
|
|
|
|
print(
|
|
|
|
"Now is the time to attach gdb by running the above "
|
|
|
|
"command, set up breakpoints etc., then resume from "
|
|
|
|
"within gdb by issuing the 'continue' command"
|
|
|
|
)
|
2019-10-21 02:53:14 +00:00
|
|
|
self.testcase.gdbserver_port += 1
|
|
|
|
elif self.testcase.debug_gdb:
|
2022-04-26 19:02:15 +02:00
|
|
|
print(
|
|
|
|
"sudo gdb "
|
|
|
|
+ self.app_bin
|
|
|
|
+ " -ex 'attach {pid}'".format(pid=self.process.pid)
|
|
|
|
)
|
|
|
|
print(
|
|
|
|
"Now is the time to attach gdb by running the above "
|
|
|
|
"command and set up breakpoints etc., then resume from"
|
|
|
|
" within gdb by issuing the 'continue' command"
|
|
|
|
)
|
2019-10-21 02:53:14 +00:00
|
|
|
print(single_line_delim)
|
|
|
|
input("Press ENTER to continue running the testcase...")
|
|
|
|
|
2017-10-16 04:20:13 -07:00
|
|
|
def run(self):
|
|
|
|
executable = self.args[0]
|
tests: Have worker return immediately on bad executable
No reason to waste CI cycles if we know the test will not run.
See:
17:55:11 ==============================================================================
17:55:11 VPP Object Model Test
17:55:11 ==============================================================================
17:55:11 Exception in thread Thread-4:
17:55:11 Traceback (most recent call last):
17:55:11 File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
17:55:11 self.run()
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/framework.py", line 1475, in run
17:55:11 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/run/venv/local/lib/python2.7/site-packages/subprocess32.py", line 617, in __init__
17:55:11 restore_signals, start_new_session)
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/run/venv/local/lib/python2.7/site-packages/subprocess32.py", line 1415, in _execute_child
17:55:11 raise child_exception_type(errno_num, err_msg)
17:55:11 OSError: [Errno 2] No such file or directory: '/w/workspace/vpp-beta-verify-master-ubuntu1804/test/build/vom_test/vom_test'
17:55:11
17:55:11 17:55:11,328 Timeout! Worker did not finish in 120s
17:55:11 run C++ VOM tests ERROR [ temp dir used by test case: /tmp/vpp-unittest-VOMTestCase-vpMcWF ]
17:55:11
Type: fix
Change-Id: I3d8252807e98a09a8abd70de8a22517151f9d786
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2019-06-30 15:38:55 -04:00
|
|
|
if not os.path.exists(executable) or not os.access(
|
2022-04-26 19:02:15 +02:00
|
|
|
executable, os.F_OK | os.X_OK
|
|
|
|
):
|
tests: Have worker return immediately on bad executable
No reason to waste CI cycles if we know the test will not run.
See:
17:55:11 ==============================================================================
17:55:11 VPP Object Model Test
17:55:11 ==============================================================================
17:55:11 Exception in thread Thread-4:
17:55:11 Traceback (most recent call last):
17:55:11 File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
17:55:11 self.run()
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/framework.py", line 1475, in run
17:55:11 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/run/venv/local/lib/python2.7/site-packages/subprocess32.py", line 617, in __init__
17:55:11 restore_signals, start_new_session)
17:55:11 File "/w/workspace/vpp-beta-verify-master-ubuntu1804/test/run/venv/local/lib/python2.7/site-packages/subprocess32.py", line 1415, in _execute_child
17:55:11 raise child_exception_type(errno_num, err_msg)
17:55:11 OSError: [Errno 2] No such file or directory: '/w/workspace/vpp-beta-verify-master-ubuntu1804/test/build/vom_test/vom_test'
17:55:11
17:55:11 17:55:11,328 Timeout! Worker did not finish in 120s
17:55:11 run C++ VOM tests ERROR [ temp dir used by test case: /tmp/vpp-unittest-VOMTestCase-vpMcWF ]
17:55:11
Type: fix
Change-Id: I3d8252807e98a09a8abd70de8a22517151f9d786
Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2019-06-30 15:38:55 -04:00
|
|
|
# Exit code that means some system file did not exist,
|
|
|
|
# could not be opened, or had some other kind of error.
|
|
|
|
self.result = os.EX_OSFILE
|
|
|
|
raise EnvironmentError(
|
2022-04-26 19:02:15 +02:00
|
|
|
"executable '%s' is not found or executable." % executable
|
|
|
|
)
|
|
|
|
self.logger.debug(
|
|
|
|
"Running executable '{app}': '{cmd}'".format(
|
|
|
|
app=self.app_name, cmd=" ".join(self.args)
|
|
|
|
)
|
|
|
|
)
|
2017-10-16 04:20:13 -07:00
|
|
|
env = os.environ.copy()
|
2018-02-16 18:31:56 -05:00
|
|
|
env.update(self.env)
|
2017-10-16 04:20:13 -07:00
|
|
|
env["CK_LOG_FILE_NAME"] = "-"
|
|
|
|
self.process = subprocess.Popen(
|
2022-04-26 19:02:15 +02:00
|
|
|
["stdbuf", "-o0", "-e0"] + self.args,
|
|
|
|
shell=False,
|
|
|
|
env=env,
|
|
|
|
preexec_fn=os.setpgrp,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
)
|
2019-10-21 02:53:14 +00:00
|
|
|
self.wait_for_enter()
|
2017-10-16 04:20:13 -07:00
|
|
|
out, err = self.process.communicate()
|
2019-10-21 02:53:14 +00:00
|
|
|
self.logger.debug("Finished running `{app}'".format(app=self.app_name))
|
2017-10-16 04:20:13 -07:00
|
|
|
self.logger.info("Return code is `%s'" % self.process.returncode)
|
|
|
|
self.logger.info(single_line_delim)
|
2022-04-26 19:02:15 +02:00
|
|
|
self.logger.info(
|
|
|
|
"Executable `{app}' wrote to stdout:".format(app=self.app_name)
|
|
|
|
)
|
2017-10-16 04:20:13 -07:00
|
|
|
self.logger.info(single_line_delim)
|
2022-04-26 19:02:15 +02:00
|
|
|
self.logger.info(out.decode("utf-8"))
|
2017-10-16 04:20:13 -07:00
|
|
|
self.logger.info(single_line_delim)
|
2022-04-26 19:02:15 +02:00
|
|
|
self.logger.info(
|
|
|
|
"Executable `{app}' wrote to stderr:".format(app=self.app_name)
|
|
|
|
)
|
2017-10-16 04:20:13 -07:00
|
|
|
self.logger.info(single_line_delim)
|
2022-04-26 19:02:15 +02:00
|
|
|
self.logger.info(err.decode("utf-8"))
|
2017-10-16 04:20:13 -07:00
|
|
|
self.logger.info(single_line_delim)
|
|
|
|
self.result = self.process.returncode
|
2019-01-13 16:09:10 -08:00
|
|
|
|
2019-05-16 14:34:55 +02:00
|
|
|
|
2022-04-26 19:02:15 +02:00
|
|
|
if __name__ == "__main__":
|
2019-01-13 16:09:10 -08:00
|
|
|
pass
|