tests: replace pycodestyle with black
Drop pycodestyle for code style checking in favor of black. Black is much faster, stable PEP8 compliant code style checker offering also automatic formatting. It aims to be very stable and produce smallest diffs. It's used by many small and big projects. Running checkstyle with black takes a few seconds with a terse output. Thus, test-checkstyle-diff is no longer necessary. Expand scope of checkstyle to all python files in the repo, replacing test-checkstyle with checkstyle-python. Also, fixstyle-python is now available for automatic style formatting. Note: python virtualenv has been consolidated in test/Makefile, test/requirements*.txt which will eventually be moved to a central location. This is required to simply the automated generation of docker executor images in the CI. Type: improvement Change-Id: I022a326603485f58585e879ac0f697fceefbc9c8 Signed-off-by: Klement Sekera <klement.sekera@gmail.com> Signed-off-by: Dave Wallace <dwallacelf@gmail.com>
This commit is contained in:
parent
f90348bcb4
commit
d9b0c6fbf7
28
Makefile
28
Makefile
@ -232,10 +232,10 @@ help:
|
||||
@echo " compdb - (re)generate compile_commands.json"
|
||||
@echo " checkstyle - check coding style"
|
||||
@echo " checkstyle-commit - check commit message format"
|
||||
@echo " checkstyle-test - check test framework coding style"
|
||||
@echo " checkstyle-test-diff - check test framework coding style (only changed files)"
|
||||
@echo " checkstyle-python - check python coding style using 'black' formatter"
|
||||
@echo " checkstyle-api - check api for incompatible changes"
|
||||
@echo " fixstyle - fix coding style"
|
||||
@echo " fixstyle-python - fix python coding style using 'black' formatter"
|
||||
@echo " doxygen - DEPRECATED - use 'make docs'"
|
||||
@echo " bootstrap-doxygen - DEPRECATED"
|
||||
@echo " wipe-doxygen - DEPRECATED"
|
||||
@ -501,13 +501,17 @@ test-wipe-cov:
|
||||
test-wipe-all:
|
||||
@make -C test wipe-all
|
||||
|
||||
# Note: All python venv consolidated in test/Makefile, test/requirements*.txt
|
||||
.PHONY: test-checkstyle
|
||||
test-checkstyle:
|
||||
@make -C test checkstyle
|
||||
$(warning test-checkstyle is deprecated. Running checkstyle-python.")
|
||||
@make -C test checkstyle-python-all
|
||||
|
||||
# Note: All python venv consolidated in test/Makefile, test/requirements*.txt
|
||||
.PHONY: test-checkstyle-diff
|
||||
test-checkstyle-diff:
|
||||
@make -C test checkstyle-diff
|
||||
$(warning test-checkstyle-diff is deprecated. Running checkstyle-python.")
|
||||
@make -C test checkstyle-python-all
|
||||
|
||||
.PHONY: test-refresh-deps
|
||||
test-refresh-deps:
|
||||
@ -664,15 +668,27 @@ checkstyle-commit:
|
||||
@extras/scripts/check_commit_msg.sh
|
||||
|
||||
.PHONY: checkstyle-test
|
||||
checkstyle-test: test-checkstyle
|
||||
checkstyle-test:
|
||||
$(warning test-checkstyle is deprecated. Running checkstyle-python.")
|
||||
@make -C test checkstyle-python-all
|
||||
|
||||
# Note: All python venv consolidated in test/Makefile, test/requirements*.txt
|
||||
.PHONY: checkstyle-python
|
||||
checkstyle-python:
|
||||
@make -C test checkstyle-python-all
|
||||
|
||||
.PHONY: checkstyle-all
|
||||
checkstyle-all: checkstyle-commit checkstyle checkstyle-test
|
||||
checkstyle-all: checkstyle-commit checkstyle checkstyle-python
|
||||
|
||||
.PHONY: fixstyle
|
||||
fixstyle:
|
||||
@extras/scripts/checkstyle.sh --fix
|
||||
|
||||
# Note: All python venv consolidated in test/Makefile, test/requirements*.txt
|
||||
.PHONY: fixstyle-python
|
||||
fixstyle-python:
|
||||
@make -C test fixstyle-python-all
|
||||
|
||||
.PHONY: checkstyle-api
|
||||
checkstyle-api:
|
||||
@extras/scripts/crcchecker.py --check-patchset
|
||||
|
@ -24,9 +24,10 @@ if len(sys.argv) < 2:
|
||||
|
||||
patterns = [
|
||||
# Search for "define" blocks and treat them as structs
|
||||
(re.compile(r"^.*(manual_.[^\s]+\s+)?define\s+(?P<name>[^\s]+)"),
|
||||
r"typedef struct vl_api_\g<name>_t"),
|
||||
|
||||
(
|
||||
re.compile(r"^.*(manual_.[^\s]+\s+)?define\s+(?P<name>[^\s]+)"),
|
||||
r"typedef struct vl_api_\g<name>_t",
|
||||
),
|
||||
# For every "brief" statement at the start of a comment block, add an
|
||||
# xref with whatever is on the same line. This gives us an index page
|
||||
# with all the API methods in one place.
|
||||
@ -36,14 +37,13 @@ patterns = [
|
||||
# r'/** @xrefitem api "" "VPP API" \g<c> \g<b> \g<c>'), # capture inline comment close
|
||||
# (re.compile(r"/\*\*\s*(?P<b>[\\@]brief)\s+(?P<c>.+)$"),
|
||||
# r'/** @xrefitem api "" "VPP API" \g<c> \g<b> \g<c>'),
|
||||
|
||||
# Since structs don't have params, replace @param with @tparam
|
||||
( re.compile("[\\@]param\\b"), "@tparam"),
|
||||
(re.compile("[\\@]param\\b"), "@tparam"),
|
||||
]
|
||||
|
||||
with open(sys.argv[1]) as fd:
|
||||
for line in fd:
|
||||
str = line[:-1] # strip \n
|
||||
str = line[:-1] # strip \n
|
||||
for p in patterns:
|
||||
str = p[0].sub(p[1], str)
|
||||
sys.stdout.write(str+"\n")
|
||||
sys.stdout.write(str + "\n")
|
||||
|
@ -23,51 +23,82 @@ if len(sys.argv) < 2:
|
||||
|
||||
replace_patterns = [
|
||||
# Search for VLIB_CLI_COMMAND, extract its parameters and add a docblock for it
|
||||
( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_cli_command_t \g<name>"),
|
||||
( re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<qual>[^)]*)[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>) */ \g<qual> vlib_cli_command_t \g<name>"),
|
||||
|
||||
(
|
||||
re.compile("(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_cli_command_t \g<name>",
|
||||
),
|
||||
(
|
||||
re.compile(
|
||||
"(?P<m>VLIB_CLI_COMMAND)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<qual>[^)]*)[)]"
|
||||
),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>) */ \g<qual> vlib_cli_command_t \g<name>",
|
||||
),
|
||||
# Search for VLIB_REGISTER_NODE, extract its parameters and add a docblock for it
|
||||
( re.compile("(?P<m>VLIB_REGISTER_NODE)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_node_registration_t \g<name>"),
|
||||
( re.compile("(?P<m>VLIB_REGISTER_NODE)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<qual>[^)]*)[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>) */ \g<qual> vlib_node_registration_t \g<name>"),
|
||||
|
||||
(
|
||||
re.compile("(?P<m>VLIB_REGISTER_NODE)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>) */ vlib_node_registration_t \g<name>",
|
||||
),
|
||||
(
|
||||
re.compile(
|
||||
"(?P<m>VLIB_REGISTER_NODE)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<qual>[^)]*)[)]"
|
||||
),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>) */ \g<qual> vlib_node_registration_t \g<name>",
|
||||
),
|
||||
# Search for VLIB_INIT_FUNCTION, extract its parameter and add a docblock for it
|
||||
( re.compile("(?P<m>VLIB_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>"),
|
||||
( re.compile("(?P<m>VLIB_DECLARE_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>"),
|
||||
|
||||
(
|
||||
re.compile("(?P<m>VLIB_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>",
|
||||
),
|
||||
(
|
||||
re.compile("(?P<m>VLIB_DECLARE_INIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ vlib_init_function_t * _vlib_init_function_\g<name>",
|
||||
),
|
||||
# Search for VLIB_LOOP_ENTER_FUNCTION, extract the parameters and add a docblock for it
|
||||
( re.compile("(?P<m>VLIB_MAIN_LOOP_ENTER_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_enter_\g<name>"),
|
||||
( re.compile("(?P<m>VLIB_MAIN_LOOP_EXIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_exit_\g<name>"),
|
||||
|
||||
(
|
||||
re.compile(
|
||||
"(?P<m>VLIB_MAIN_LOOP_ENTER_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"
|
||||
),
|
||||
r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_enter_\g<name>",
|
||||
),
|
||||
(
|
||||
re.compile(
|
||||
"(?P<m>VLIB_MAIN_LOOP_EXIT_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"
|
||||
),
|
||||
r"/** @brief (@em constructor) \g<m> (@ref \g<name>) */ _vlib_main_loop_exit_\g<name>",
|
||||
),
|
||||
# Search for VLIB_CONFIG_FUNCTION, extract the parameters and add a docblock for it
|
||||
( re.compile("(?P<m>VLIB_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>\"[^\"]+\")(,[^)]*)?[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>"),
|
||||
( re.compile("(?P<m>VLIB_EARLY_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>\"[^\"]+\")(,[^)]*)?[)]"),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>"),
|
||||
|
||||
(
|
||||
re.compile(
|
||||
'(?P<m>VLIB_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>"[^"]+")(,[^)]*)?[)]'
|
||||
),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>",
|
||||
),
|
||||
(
|
||||
re.compile(
|
||||
'(?P<m>VLIB_EARLY_CONFIG_FUNCTION)\s*[(](?P<name>[a-zA-Z0-9_]+),\s*(?P<n>"[^"]+")(,[^)]*)?[)]'
|
||||
),
|
||||
r"/** @brief (@em constructor) \g<m> (\g<name>, \g<n>) */ vlib_config_function_runtime_t _vlib_config_function_\g<name>",
|
||||
),
|
||||
# Search for "format_thing" and "unformat_thing" when used as a function pointer and add parens
|
||||
( re.compile("(?P<pre>(^|,)\s*)(?P<name>(un)?format_[a-zA-Z0-9_]+)(?P<post>\s*(,|$))"),
|
||||
r"\g<pre>\g<name>()\g<post>" ),
|
||||
|
||||
(
|
||||
re.compile(
|
||||
"(?P<pre>(^|,)\s*)(?P<name>(un)?format_[a-zA-Z0-9_]+)(?P<post>\s*(,|$))"
|
||||
),
|
||||
r"\g<pre>\g<name>()\g<post>",
|
||||
),
|
||||
# Search for CLIB_PAD_FROM_TO(...); and replace with padding
|
||||
# #define CLIB_PAD_FROM_TO(from,to) u8 pad_##from[(to) - (from)]
|
||||
( re.compile("(?P<m>CLIB_PAD_FROM_TO)\s*[(](?P<from>[^,]+),\s*(?P<to>[^)]+)[)]"),
|
||||
r"/** Padding. */ u8 pad_\g<from>[(\g<to>) - (\g<from>)]" ),
|
||||
|
||||
(
|
||||
re.compile("(?P<m>CLIB_PAD_FROM_TO)\s*[(](?P<from>[^,]+),\s*(?P<to>[^)]+)[)]"),
|
||||
r"/** Padding. */ u8 pad_\g<from>[(\g<to>) - (\g<from>)]",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
filename = sys.argv[1]
|
||||
cwd = os.getcwd()
|
||||
if filename[0:len(cwd)] == cwd:
|
||||
filename = filename[len(cwd):]
|
||||
if filename[0 : len(cwd)] == cwd:
|
||||
filename = filename[len(cwd) :]
|
||||
if filename[0] == "/":
|
||||
filename = filename[1:]
|
||||
|
||||
@ -76,12 +107,12 @@ with open(filename) as fd:
|
||||
|
||||
for line in fd:
|
||||
line_num += 1
|
||||
str = line[:-1] # filter \n
|
||||
str = line[:-1] # filter \n
|
||||
|
||||
# Look for search/replace patterns
|
||||
for p in replace_patterns:
|
||||
str = p[0].sub(p[1], str)
|
||||
|
||||
sys.stdout.write(str+"\n")
|
||||
sys.stdout.write(str + "\n")
|
||||
|
||||
# All done
|
||||
|
@ -26,17 +26,19 @@ if len(sys.argv) < 2:
|
||||
replace_patterns = [
|
||||
# Search for CLIB_PAD_FROM_TO(...); and replace with padding
|
||||
# #define CLIB_PAD_FROM_TO(from,to) u8 pad_##from[(to) - (from)]
|
||||
(re.compile(r"(?P<m>CLIB_PAD_FROM_TO)\s*[(](?P<from>[^,]+),"
|
||||
r"\s*(?P<to>[^)]+)[)]"),
|
||||
r"/** Padding. */ u8 pad_\g<from>[(\g<to>) - (\g<from>)]"),
|
||||
|
||||
(
|
||||
re.compile(
|
||||
r"(?P<m>CLIB_PAD_FROM_TO)\s*[(](?P<from>[^,]+)," r"\s*(?P<to>[^)]+)[)]"
|
||||
),
|
||||
r"/** Padding. */ u8 pad_\g<from>[(\g<to>) - (\g<from>)]",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
filename = sys.argv[1]
|
||||
cwd = os.getcwd()
|
||||
if filename[0:len(cwd)] == cwd:
|
||||
filename = filename[len(cwd):]
|
||||
if filename[0 : len(cwd)] == cwd:
|
||||
filename = filename[len(cwd) :]
|
||||
if filename[0] == "/":
|
||||
filename = filename[1:]
|
||||
|
||||
@ -51,6 +53,6 @@ with open(filename) as fd:
|
||||
for p in replace_patterns:
|
||||
str = p[0].sub(p[1], str)
|
||||
|
||||
sys.stdout.write(str+"\n")
|
||||
sys.stdout.write(str + "\n")
|
||||
|
||||
# All done
|
||||
|
@ -33,15 +33,14 @@ class ContentRenderer:
|
||||
|
||||
|
||||
class PluginRenderer(ContentRenderer):
|
||||
|
||||
def _render_entry(self, output_file, entry):
|
||||
description = "<no-description-found>"
|
||||
# we use glob because a plugin can (ioam for now)
|
||||
# define the plugin definition in
|
||||
# a further subdirectory.
|
||||
path = os.path.join(self.plugin_dir(), entry.name, '**')
|
||||
path = os.path.join(self.plugin_dir(), entry.name, "**")
|
||||
for f in glob.iglob(path, recursive=True):
|
||||
if not f.endswith('.c'):
|
||||
if not f.endswith(".c"):
|
||||
continue
|
||||
with open(f, "r", encoding="utf-8") as src:
|
||||
for match in self.regex.finditer(src.read()):
|
||||
@ -56,7 +55,7 @@ class PluginRenderer(ContentRenderer):
|
||||
with open(fname, "w") as output_file:
|
||||
with os.scandir(self.plugin_dir()) as pdir:
|
||||
for entry in sorted(pdir, key=lambda entry: entry.name):
|
||||
if not entry.name.startswith('.') and entry.is_dir():
|
||||
if not entry.name.startswith(".") and entry.is_dir():
|
||||
self._render_entry(output_file, entry)
|
||||
|
||||
|
||||
|
@ -24,8 +24,10 @@ import re
|
||||
themselves on this list."""
|
||||
siphon_patterns = []
|
||||
|
||||
|
||||
class Generate(object):
|
||||
"""Matches a siphon comment block start"""
|
||||
|
||||
siphon_block_start = re.compile("^\s*/\*\?\s*(.*)$")
|
||||
|
||||
"""Matches a siphon comment block stop"""
|
||||
@ -36,8 +38,10 @@ class Generate(object):
|
||||
|
||||
"""Matches a siphon block directive such as
|
||||
'%clicmd:group_label Debug CLI%'"""
|
||||
siphon_block_directive = re.compile("(%s)\s*([a-zA-Z0-9_:]+)\s+(.*)\s*(%s)" % \
|
||||
(siphon_block_delimiter, siphon_block_delimiter))
|
||||
siphon_block_directive = re.compile(
|
||||
"(%s)\s*([a-zA-Z0-9_:]+)\s+(.*)\s*(%s)"
|
||||
% (siphon_block_delimiter, siphon_block_delimiter)
|
||||
)
|
||||
|
||||
"""Matches the start of an initializer block"""
|
||||
siphon_initializer = re.compile("\s*=")
|
||||
@ -54,7 +58,6 @@ class Generate(object):
|
||||
"""Logging handler"""
|
||||
log = None
|
||||
|
||||
|
||||
def __init__(self, output_directory, input_prefix):
|
||||
super(Generate, self).__init__()
|
||||
self.log = logging.getLogger("siphon.generate")
|
||||
@ -70,14 +73,13 @@ class Generate(object):
|
||||
self.output = {}
|
||||
for siphon in self.known_siphons:
|
||||
self.output[siphon] = {
|
||||
"file": "%s/%s.siphon" % (output_directory, siphon),
|
||||
"global": {},
|
||||
"items": [],
|
||||
}
|
||||
"file": "%s/%s.siphon" % (output_directory, siphon),
|
||||
"global": {},
|
||||
"items": [],
|
||||
}
|
||||
|
||||
self.input_prefix = input_prefix
|
||||
|
||||
|
||||
"""
|
||||
count open and close braces in str
|
||||
return (0, index) when braces were found and count becomes 0.
|
||||
@ -87,16 +89,17 @@ class Generate(object):
|
||||
return (count, -1) if not all opening braces are closed, count is the
|
||||
current depth
|
||||
"""
|
||||
|
||||
def count_braces(self, str, count=0, found=False):
|
||||
for index in range(0, len(str)):
|
||||
if str[index] == '{':
|
||||
count += 1;
|
||||
if str[index] == "{":
|
||||
count += 1
|
||||
found = True
|
||||
elif str[index] == '}':
|
||||
elif str[index] == "}":
|
||||
if count == 0:
|
||||
# means we never found an open brace
|
||||
return (-1, -1)
|
||||
count -= 1;
|
||||
count -= 1
|
||||
|
||||
if count == 0 and found:
|
||||
return (count, index)
|
||||
@ -106,8 +109,8 @@ class Generate(object):
|
||||
def parse(self, filename):
|
||||
# Strip the current directory off the start of the
|
||||
# filename for brevity
|
||||
if filename[0:len(self.input_prefix)] == self.input_prefix:
|
||||
filename = filename[len(self.input_prefix):]
|
||||
if filename[0 : len(self.input_prefix)] == self.input_prefix:
|
||||
filename = filename[len(self.input_prefix) :]
|
||||
if filename[0] == "/":
|
||||
filename = filename[1:]
|
||||
|
||||
@ -115,8 +118,8 @@ class Generate(object):
|
||||
directory = os.path.dirname(filename)
|
||||
if directory[0:2] == "./":
|
||||
directory = directory[2:]
|
||||
elif directory[0:len(self.input_prefix)] == self.input_prefix:
|
||||
directory = directory[len(self.input_prefix):]
|
||||
elif directory[0 : len(self.input_prefix)] == self.input_prefix:
|
||||
directory = directory[len(self.input_prefix) :]
|
||||
if directory[0] == "/":
|
||||
directory = directory[1:]
|
||||
|
||||
@ -133,9 +136,10 @@ class Generate(object):
|
||||
|
||||
for line in fd:
|
||||
line_num += 1
|
||||
str = line[:-1] # filter \n
|
||||
str = line[:-1] # filter \n
|
||||
|
||||
"""See if there is a block directive and if so extract it"""
|
||||
|
||||
def process_block_directive(str, directives):
|
||||
m = self.siphon_block_directive.search(str)
|
||||
if m is not None:
|
||||
@ -143,7 +147,7 @@ class Generate(object):
|
||||
v = m.group(3).strip()
|
||||
directives[k] = v
|
||||
# Return only the parts we did not match
|
||||
return str[0:m.start(1)] + str[m.end(4):]
|
||||
return str[0 : m.start(1)] + str[m.end(4) :]
|
||||
|
||||
return str
|
||||
|
||||
@ -200,27 +204,25 @@ class Generate(object):
|
||||
# Skip to next line
|
||||
continue
|
||||
|
||||
|
||||
if siphon is None:
|
||||
# Look for blocks we need to siphon
|
||||
for p in siphon_patterns:
|
||||
if p[0].match(str):
|
||||
siphon = [ p[1], str + "\n", 0 ]
|
||||
siphon = [p[1], str + "\n", 0]
|
||||
siphon_line = line_num
|
||||
|
||||
# see if we have an initializer
|
||||
m = self.siphon_initializer.search(str)
|
||||
if m is not None:
|
||||
# count the braces on this line
|
||||
(count, index) = \
|
||||
self.count_braces(str[m.start():])
|
||||
(count, index) = self.count_braces(str[m.start() :])
|
||||
siphon[2] = count
|
||||
# TODO - it's possible we have the
|
||||
# initializer all on the first line
|
||||
# we should check for it, but also
|
||||
# account for the possibility that
|
||||
# the open brace is on the next line
|
||||
#if count == 0:
|
||||
# if count == 0:
|
||||
# # braces balanced
|
||||
# close_siphon = siphon
|
||||
# siphon = None
|
||||
@ -231,12 +233,11 @@ class Generate(object):
|
||||
else:
|
||||
# See if we should end the siphon here - do we have
|
||||
# balanced braces?
|
||||
(count, index) = self.count_braces(str,
|
||||
count=siphon[2], found=True)
|
||||
(count, index) = self.count_braces(str, count=siphon[2], found=True)
|
||||
if count == 0:
|
||||
# braces balanced - add the substring and
|
||||
# close the siphon
|
||||
siphon[1] += str[:index+1] + ";\n"
|
||||
siphon[1] += str[: index + 1] + ";\n"
|
||||
close_siphon = siphon
|
||||
siphon = None
|
||||
else:
|
||||
@ -259,15 +260,15 @@ class Generate(object):
|
||||
details[key] = directives[key]
|
||||
|
||||
# Copy details for this block
|
||||
details['file'] = filename
|
||||
details['directory'] = directory
|
||||
details['line_start'] = siphon_line
|
||||
details['line_end'] = line_num
|
||||
details['siphon_block'] = siphon_block.strip()
|
||||
details["file"] = filename
|
||||
details["directory"] = directory
|
||||
details["line_start"] = siphon_line
|
||||
details["line_end"] = line_num
|
||||
details["siphon_block"] = siphon_block.strip()
|
||||
details["block"] = close_siphon[1]
|
||||
|
||||
# Store the item
|
||||
self.output[siphon_name]['items'].append(details)
|
||||
self.output[siphon_name]["items"].append(details)
|
||||
|
||||
# All done
|
||||
close_siphon = None
|
||||
@ -275,7 +276,7 @@ class Generate(object):
|
||||
|
||||
# Update globals
|
||||
for key in directives.keys():
|
||||
if ':' not in key:
|
||||
if ":" not in key:
|
||||
continue
|
||||
|
||||
if filename.endswith("/dir.dox"):
|
||||
@ -288,19 +289,17 @@ class Generate(object):
|
||||
|
||||
if sn not in self.output:
|
||||
self.output[sn] = {}
|
||||
if 'global' not in self.output[sn]:
|
||||
self.output[sn]['global'] = {}
|
||||
if l not in self.output[sn]['global']:
|
||||
self.output[sn]['global'][l] = {}
|
||||
if "global" not in self.output[sn]:
|
||||
self.output[sn]["global"] = {}
|
||||
if l not in self.output[sn]["global"]:
|
||||
self.output[sn]["global"][l] = {}
|
||||
|
||||
self.output[sn]['global'][l][label] = directives[key]
|
||||
self.output[sn]["global"][l][label] = directives[key]
|
||||
|
||||
def deliver(self):
|
||||
# Write out the data
|
||||
for siphon in self.output.keys():
|
||||
self.log.info("Saving siphon data %s." % siphon)
|
||||
s = self.output[siphon]
|
||||
with open(s['file'], "a") as fp:
|
||||
json.dump(s, fp,
|
||||
separators=(',', ': '), indent=4, sort_keys=True)
|
||||
|
||||
with open(s["file"], "a") as fp:
|
||||
json.dump(s, fp, separators=(",", ": "), indent=4, sort_keys=True)
|
||||
|
@ -17,8 +17,11 @@ import re
|
||||
from . import generate
|
||||
|
||||
# Register our regexp
|
||||
generate.siphon_patterns.append((
|
||||
re.compile("(?P<m>VLIB_CLI_COMMAND)\s*"
|
||||
"[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"),
|
||||
"clicmd"
|
||||
))
|
||||
generate.siphon_patterns.append(
|
||||
(
|
||||
re.compile(
|
||||
"(?P<m>VLIB_CLI_COMMAND)\s*" "[(](?P<name>[a-zA-Z0-9_]+)(,[^)]*)?[)]"
|
||||
),
|
||||
"clicmd",
|
||||
)
|
||||
)
|
||||
|
@ -17,8 +17,12 @@ import re
|
||||
from . import generate
|
||||
|
||||
# Register our regexp
|
||||
generate.siphon_patterns.append((
|
||||
re.compile("(?P<m>VLIB_CONFIG_FUNCTION)\s*"
|
||||
'[(](?P<fn>[a-zA-Z0-9_]+)\s*,\s*"(?P<name>[^"]*)"[)]'),
|
||||
"syscfg"
|
||||
))
|
||||
generate.siphon_patterns.append(
|
||||
(
|
||||
re.compile(
|
||||
"(?P<m>VLIB_CONFIG_FUNCTION)\s*"
|
||||
'[(](?P<fn>[a-zA-Z0-9_]+)\s*,\s*"(?P<name>[^"]*)"[)]'
|
||||
),
|
||||
"syscfg",
|
||||
)
|
||||
)
|
||||
|
@ -18,9 +18,10 @@ ident = pp.Word(pp.alphas + "_", pp.alphas + pp.nums + "_")
|
||||
intNum = pp.Word(pp.nums)
|
||||
hexNum = pp.Literal("0x") + pp.Word(pp.hexnums)
|
||||
octalNum = pp.Literal("0") + pp.Word("01234567")
|
||||
integer = (hexNum | octalNum | intNum) + \
|
||||
pp.Optional(pp.Literal("ULL") | pp.Literal("LL") | pp.Literal("L"))
|
||||
floatNum = pp.Regex(r'\d+(\.\d*)?([eE]\d+)?') + pp.Optional(pp.Literal("f"))
|
||||
integer = (hexNum | octalNum | intNum) + pp.Optional(
|
||||
pp.Literal("ULL") | pp.Literal("LL") | pp.Literal("L")
|
||||
)
|
||||
floatNum = pp.Regex(r"\d+(\.\d*)?([eE]\d+)?") + pp.Optional(pp.Literal("f"))
|
||||
char = pp.Literal("'") + pp.Word(pp.printables, exact=1) + pp.Literal("'")
|
||||
arrayIndex = integer | ident
|
||||
|
||||
@ -36,23 +37,29 @@ semicolon = pp.Literal(";").suppress()
|
||||
# initializer := { [member = ] (variable | expression | { initializer } ) }
|
||||
typeName = ident
|
||||
varName = ident
|
||||
typeSpec = pp.Optional("unsigned") + \
|
||||
pp.oneOf("int long short float double char u8 i8 void") + \
|
||||
pp.Optional(pp.Word("*"), default="")
|
||||
typeCast = pp.Combine( "(" + ( typeSpec | typeName ) + ")" ).suppress()
|
||||
typeSpec = (
|
||||
pp.Optional("unsigned")
|
||||
+ pp.oneOf("int long short float double char u8 i8 void")
|
||||
+ pp.Optional(pp.Word("*"), default="")
|
||||
)
|
||||
typeCast = pp.Combine("(" + (typeSpec | typeName) + ")").suppress()
|
||||
|
||||
string = pp.Combine(pp.OneOrMore(pp.QuotedString(quoteChar='"',
|
||||
escChar='\\', multiline=True)), adjacent=False)
|
||||
string = pp.Combine(
|
||||
pp.OneOrMore(pp.QuotedString(quoteChar='"', escChar="\\", multiline=True)),
|
||||
adjacent=False,
|
||||
)
|
||||
literal = pp.Optional(typeCast) + (integer | floatNum | char | string)
|
||||
var = pp.Combine(pp.Optional(typeCast) + varName +
|
||||
pp.Optional("[" + arrayIndex + "]"))
|
||||
var = pp.Combine(pp.Optional(typeCast) + varName + pp.Optional("[" + arrayIndex + "]"))
|
||||
|
||||
# This could be more complete, but suffices for our uses
|
||||
expr = (literal | var)
|
||||
expr = literal | var
|
||||
|
||||
"""Parse and render a block of text into a Python dictionary."""
|
||||
|
||||
|
||||
class Parser(object):
|
||||
"""Compiled PyParsing BNF"""
|
||||
|
||||
_parser = None
|
||||
|
||||
def __init__(self):
|
||||
@ -71,6 +78,8 @@ class Parser(object):
|
||||
|
||||
|
||||
"""Parser for function-like macros - without the closing semi-colon."""
|
||||
|
||||
|
||||
class ParserFunctionMacro(Parser):
|
||||
def BNF(self):
|
||||
# VLIB_CONFIG_FUNCTION (unix_config, "unix")
|
||||
@ -91,6 +100,8 @@ class ParserFunctionMacro(Parser):
|
||||
|
||||
|
||||
"""Parser for function-like macros with a closing semi-colon."""
|
||||
|
||||
|
||||
class ParseFunctionMacroStmt(ParserFunctionMacro):
|
||||
def BNF(self):
|
||||
# VLIB_CONFIG_FUNCTION (unix_config, "unix");
|
||||
@ -106,6 +117,8 @@ Parser for our struct initializers which are composed from a
|
||||
function-like macro, equals sign, and then a normal C struct initializer
|
||||
block.
|
||||
"""
|
||||
|
||||
|
||||
class MacroInitializer(ParserFunctionMacro):
|
||||
def BNF(self):
|
||||
# VLIB_CLI_COMMAND (show_sr_tunnel_command, static) = {
|
||||
@ -115,14 +128,15 @@ class MacroInitializer(ParserFunctionMacro):
|
||||
# };
|
||||
cs = pp.Forward()
|
||||
|
||||
|
||||
member = pp.Combine(dot + varName + pp.Optional("[" + arrayIndex + "]"),
|
||||
adjacent=False)
|
||||
value = (expr | cs)
|
||||
member = pp.Combine(
|
||||
dot + varName + pp.Optional("[" + arrayIndex + "]"), adjacent=False
|
||||
)
|
||||
value = expr | cs
|
||||
|
||||
entry = pp.Group(pp.Optional(member + equals, default="") + value)
|
||||
entries = (pp.ZeroOrMore(entry + comma) + entry + pp.Optional(comma)) | \
|
||||
(pp.ZeroOrMore(entry + comma))
|
||||
entries = (pp.ZeroOrMore(entry + comma) + entry + pp.Optional(comma)) | (
|
||||
pp.ZeroOrMore(entry + comma)
|
||||
)
|
||||
|
||||
cs << (lbrace + entries + rbrace)
|
||||
|
||||
|
@ -88,7 +88,8 @@ class Siphon(object):
|
||||
loader=loader,
|
||||
trim_blocks=True,
|
||||
autoescape=False,
|
||||
keep_trailing_newline=True)
|
||||
keep_trailing_newline=True,
|
||||
)
|
||||
|
||||
# Convenience, get a reference to the internal escape and
|
||||
# unescape methods in html.parser. These then become
|
||||
@ -103,32 +104,38 @@ class Siphon(object):
|
||||
# Output renderers
|
||||
|
||||
"""Returns an object to be used as the sorting key in the item index."""
|
||||
|
||||
def index_sort_key(self, group):
|
||||
return group
|
||||
|
||||
"""Returns a string to use as the header at the top of the item index."""
|
||||
|
||||
def index_header(self):
|
||||
return self.template("index_header")
|
||||
|
||||
"""Returns the string fragment to use for each section in the item
|
||||
index."""
|
||||
|
||||
def index_section(self, group):
|
||||
return self.template("index_section", group=group)
|
||||
|
||||
"""Returns the string fragment to use for each entry in the item index."""
|
||||
|
||||
def index_entry(self, meta, item):
|
||||
return self.template("index_entry", meta=meta, item=item)
|
||||
|
||||
"""Returns an object, typically a string, to be used as the sorting key
|
||||
for items within a section."""
|
||||
|
||||
def item_sort_key(self, item):
|
||||
return item['name']
|
||||
return item["name"]
|
||||
|
||||
"""Returns a key for grouping items together."""
|
||||
def group_key(self, directory, file, macro, name):
|
||||
_global = self._cmds['_global']
|
||||
|
||||
if file in _global and 'group_label' in _global[file]:
|
||||
def group_key(self, directory, file, macro, name):
|
||||
_global = self._cmds["_global"]
|
||||
|
||||
if file in _global and "group_label" in _global[file]:
|
||||
self._group[file] = (directory, file)
|
||||
return file
|
||||
|
||||
@ -136,60 +143,59 @@ class Siphon(object):
|
||||
return directory
|
||||
|
||||
"""Returns a key for identifying items within a grouping."""
|
||||
|
||||
def item_key(self, directory, file, macro, name):
|
||||
return name
|
||||
|
||||
"""Returns a string to use as the header when rendering the item."""
|
||||
|
||||
def item_header(self, group):
|
||||
return self.template("item_header", group=group)
|
||||
|
||||
"""Returns a string to use as the body when rendering the item."""
|
||||
|
||||
def item_format(self, meta, item):
|
||||
return self.template("item_format", meta=meta, item=item)
|
||||
|
||||
"""Returns a string to use as the label for the page reference."""
|
||||
|
||||
def page_label(self, group):
|
||||
return "_".join((
|
||||
self.name,
|
||||
self.sanitize_label(group)
|
||||
))
|
||||
return "_".join((self.name, self.sanitize_label(group)))
|
||||
|
||||
"""Returns a title to use for a page."""
|
||||
|
||||
def page_title(self, group):
|
||||
_global = self._cmds['_global']
|
||||
_global = self._cmds["_global"]
|
||||
(directory, file) = self._group[group]
|
||||
|
||||
if file and file in _global and 'group_label' in _global[file]:
|
||||
return _global[file]['group_label']
|
||||
if file and file in _global and "group_label" in _global[file]:
|
||||
return _global[file]["group_label"]
|
||||
|
||||
if directory in _global and 'group_label' in _global[directory]:
|
||||
return _global[directory]['group_label']
|
||||
if directory in _global and "group_label" in _global[directory]:
|
||||
return _global[directory]["group_label"]
|
||||
|
||||
return directory
|
||||
|
||||
"""Returns a string to use as the label for the section reference."""
|
||||
|
||||
def item_label(self, group, item):
|
||||
return "__".join((
|
||||
self.name,
|
||||
item
|
||||
))
|
||||
return "__".join((self.name, item))
|
||||
|
||||
"""Label sanitizer; for creating Doxygen references"""
|
||||
|
||||
def sanitize_label(self, value):
|
||||
return value.replace(" ", "_") \
|
||||
.replace("/", "_") \
|
||||
.replace(".", "_")
|
||||
return value.replace(" ", "_").replace("/", "_").replace(".", "_")
|
||||
|
||||
"""Template processor"""
|
||||
|
||||
def template(self, name, **kwargs):
|
||||
tpl = self._tplenv.get_template(name + self._format.extension)
|
||||
return tpl.render(
|
||||
this=self,
|
||||
**kwargs)
|
||||
return tpl.render(this=self, **kwargs)
|
||||
|
||||
# Processing methods
|
||||
|
||||
"""Parse the input file into a more usable dictionary structure."""
|
||||
|
||||
def load_json(self, files):
|
||||
self._cmds = {}
|
||||
self._group = {}
|
||||
@ -198,34 +204,37 @@ class Siphon(object):
|
||||
line_start = 0
|
||||
for filename in files:
|
||||
filename = os.path.relpath(filename)
|
||||
self.log.info("Parsing items in file \"%s\"." % filename)
|
||||
self.log.info('Parsing items in file "%s".' % filename)
|
||||
data = None
|
||||
with open(filename, "r") as fd:
|
||||
data = json.load(fd)
|
||||
|
||||
self._cmds['_global'] = data['global']
|
||||
self._cmds["_global"] = data["global"]
|
||||
|
||||
# iterate the items loaded and regroup it
|
||||
for item in data["items"]:
|
||||
try:
|
||||
o = self._parser.parse(item['block'])
|
||||
o = self._parser.parse(item["block"])
|
||||
except Exception:
|
||||
self.log.error("Exception parsing item: %s\n%s"
|
||||
% (json.dumps(item, separators=(',', ': '),
|
||||
indent=4),
|
||||
item['block']))
|
||||
self.log.error(
|
||||
"Exception parsing item: %s\n%s"
|
||||
% (
|
||||
json.dumps(item, separators=(",", ": "), indent=4),
|
||||
item["block"],
|
||||
)
|
||||
)
|
||||
raise
|
||||
|
||||
# Augment the item with metadata
|
||||
o["meta"] = {}
|
||||
for key in item:
|
||||
if key == 'block':
|
||||
if key == "block":
|
||||
continue
|
||||
o['meta'][key] = item[key]
|
||||
o["meta"][key] = item[key]
|
||||
|
||||
# Load some interesting fields
|
||||
directory = item['directory']
|
||||
file = item['file']
|
||||
directory = item["directory"]
|
||||
file = item["file"]
|
||||
macro = o["macro"]
|
||||
name = o["name"]
|
||||
|
||||
@ -240,6 +249,7 @@ class Siphon(object):
|
||||
|
||||
"""Iterate over the input data, calling render methods to generate the
|
||||
output."""
|
||||
|
||||
def process(self, out=None):
|
||||
|
||||
if out is None:
|
||||
@ -257,11 +267,12 @@ class Siphon(object):
|
||||
|
||||
# Iterate the dictionary and process it
|
||||
for group in sorted(self._cmds.keys(), key=group_sort_key):
|
||||
if group.startswith('_'):
|
||||
if group.startswith("_"):
|
||||
continue
|
||||
|
||||
self.log.info("Processing items in group \"%s\" (%s)." %
|
||||
(group, group_sort_key(group)))
|
||||
self.log.info(
|
||||
'Processing items in group "%s" (%s).' % (group, group_sort_key(group))
|
||||
)
|
||||
|
||||
# Generate the section index entry (write it now)
|
||||
out.write(self.index_section(group))
|
||||
@ -273,15 +284,16 @@ class Siphon(object):
|
||||
return self.item_sort_key(self._cmds[group][key])
|
||||
|
||||
for key in sorted(self._cmds[group].keys(), key=item_sort_key):
|
||||
self.log.debug("--- Processing key \"%s\" (%s)." %
|
||||
(key, item_sort_key(key)))
|
||||
self.log.debug(
|
||||
'--- Processing key "%s" (%s).' % (key, item_sort_key(key))
|
||||
)
|
||||
|
||||
o = self._cmds[group][key]
|
||||
meta = {
|
||||
"directory": o['meta']['directory'],
|
||||
"file": o['meta']['file'],
|
||||
"macro": o['macro'],
|
||||
"name": o['name'],
|
||||
"directory": o["meta"]["directory"],
|
||||
"file": o["meta"]["file"],
|
||||
"macro": o["macro"],
|
||||
"name": o["name"],
|
||||
"key": key,
|
||||
"label": self.item_label(group, key),
|
||||
}
|
||||
@ -304,7 +316,7 @@ class Siphon(object):
|
||||
|
||||
def do_cliexstart(self, matchobj):
|
||||
title = matchobj.group(1)
|
||||
title = ' '.join(title.splitlines())
|
||||
title = " ".join(title.splitlines())
|
||||
content = matchobj.group(2)
|
||||
content = re.sub(r"\n", r"\n ", content)
|
||||
return "\n\n.. code-block:: console\n\n %s\n %s\n\n" % (title, content)
|
||||
@ -316,7 +328,7 @@ class Siphon(object):
|
||||
|
||||
def do_cliexcmd(self, matchobj):
|
||||
content = matchobj.group(1)
|
||||
content = ' '.join(content.splitlines())
|
||||
content = " ".join(content.splitlines())
|
||||
return "\n\n.. code-block:: console\n\n %s\n\n" % content
|
||||
|
||||
def process_list(self, matchobj):
|
||||
@ -351,7 +363,9 @@ class Siphon(object):
|
||||
s = re.sub(r"@TODO[^\n]*", "", s)
|
||||
# ----------- code blocks
|
||||
s = re.sub(r"@cliexcmd{(.+?)}", self.do_cliexcmd, s, flags=re.DOTALL)
|
||||
s = re.sub(r"@cliexstart{(.+?)}(.+?)@cliexend", self.do_cliexstart, s, flags=re.DOTALL)
|
||||
s = re.sub(
|
||||
r"@cliexstart{(.+?)}(.+?)@cliexend", self.do_cliexstart, s, flags=re.DOTALL
|
||||
)
|
||||
s = re.sub(r"@clistart(.+?)@cliend", self.do_clistart, s, flags=re.DOTALL)
|
||||
# ----------- lists
|
||||
s = re.sub(r"^\s*-", r"\n@@@@", s, flags=re.MULTILINE)
|
||||
@ -377,6 +391,7 @@ class Siphon(object):
|
||||
s = re.sub(r"\n[ \f\v\t]*", "\n", s)
|
||||
return s
|
||||
|
||||
|
||||
class Format(object):
|
||||
"""Output format class"""
|
||||
|
||||
@ -389,6 +404,7 @@ class Format(object):
|
||||
|
||||
class FormatMarkdown(Format):
|
||||
"""Markdown output format"""
|
||||
|
||||
name = "markdown"
|
||||
extension = ".md"
|
||||
|
||||
@ -399,6 +415,7 @@ formats["markdown"] = FormatMarkdown
|
||||
|
||||
class FormatItemlist(Format):
|
||||
"""Itemlist output format"""
|
||||
|
||||
name = "itemlist"
|
||||
extension = ".itemlist"
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
from . import process, parsers
|
||||
import os
|
||||
|
||||
|
||||
class SiphonCLICMD(process.Siphon):
|
||||
|
||||
name = "clicmd"
|
||||
@ -32,37 +33,36 @@ class SiphonCLICMD(process.Siphon):
|
||||
return self.page_label(group) + ".rst"
|
||||
|
||||
def index_sort_key(self, group):
|
||||
_global = self._cmds['_global']
|
||||
_global = self._cmds["_global"]
|
||||
if group not in self._group:
|
||||
return group
|
||||
(directory, file) = self._group[group]
|
||||
|
||||
if file in _global and 'group_label' in _global[file]:
|
||||
return _global[file]['group_label']
|
||||
if file in _global and "group_label" in _global[file]:
|
||||
return _global[file]["group_label"]
|
||||
|
||||
if directory in _global and 'group_label' in _global[directory]:
|
||||
return _global[directory]['group_label']
|
||||
if directory in _global and "group_label" in _global[directory]:
|
||||
return _global[directory]["group_label"]
|
||||
|
||||
return group
|
||||
|
||||
def item_sort_key(self, item):
|
||||
return item['value']['path']
|
||||
return item["value"]["path"]
|
||||
|
||||
def item_label(self, group, item):
|
||||
return "_".join((
|
||||
self.name,
|
||||
self.sanitize_label(self._cmds[group][item]['value']['path'])
|
||||
))
|
||||
return "_".join(
|
||||
(self.name, self.sanitize_label(self._cmds[group][item]["value"]["path"]))
|
||||
)
|
||||
|
||||
def page_title(self, group):
|
||||
_global = self._cmds['_global']
|
||||
_global = self._cmds["_global"]
|
||||
(directory, file) = self._group[group]
|
||||
|
||||
if file and file in _global and 'group_label' in _global[file]:
|
||||
return _global[file]['group_label']
|
||||
if file and file in _global and "group_label" in _global[file]:
|
||||
return _global[file]["group_label"]
|
||||
|
||||
if directory in _global and 'group_label' in _global[directory]:
|
||||
return _global[directory]['group_label']
|
||||
if directory in _global and "group_label" in _global[directory]:
|
||||
return _global[directory]["group_label"]
|
||||
|
||||
file_ext = os.path.basename(directory)
|
||||
fname, ext = os.path.splitext(file_ext)
|
||||
|
87
docs/conf.py
87
docs/conf.py
@ -20,14 +20,16 @@ import subprocess
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = u'The Vector Packet Processor'
|
||||
copyright = u'2018-2022, Linux Foundation'
|
||||
author = u'FD.io VPP Community'
|
||||
project = "The Vector Packet Processor"
|
||||
copyright = "2018-2022, Linux Foundation"
|
||||
author = "FD.io VPP Community"
|
||||
|
||||
# The short X.Y version
|
||||
version = subprocess.run(["git", "describe"], stdout=subprocess.PIPE, text=True).stdout
|
||||
# The full version, including alpha/beta/rc tags
|
||||
release = subprocess.run(["git", "describe", "--long"], stdout=subprocess.PIPE, text=True).stdout
|
||||
release = subprocess.run(
|
||||
["git", "describe", "--long"], stdout=subprocess.PIPE, text=True
|
||||
).stdout
|
||||
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
@ -40,29 +42,27 @@ release = subprocess.run(["git", "describe", "--long"], stdout=subprocess.PIPE,
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.viewcode',
|
||||
'recommonmark',
|
||||
'sphinxcontrib.spelling']
|
||||
"sphinx.ext.autodoc",
|
||||
"sphinx.ext.viewcode",
|
||||
"recommonmark",
|
||||
"sphinxcontrib.spelling",
|
||||
]
|
||||
|
||||
spelling_word_list_filename = 'spelling_wordlist.txt'
|
||||
spelling_word_list_filename = "spelling_wordlist.txt"
|
||||
|
||||
# do not spell check those files
|
||||
spelling_exclude_patterns = ['aboutvpp/releasenotes/*']
|
||||
spelling_exclude_patterns = ["aboutvpp/releasenotes/*"]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
source_suffix = {
|
||||
'.rst': 'restructuredtext',
|
||||
'.md': 'markdown'
|
||||
}
|
||||
source_suffix = {".rst": "restructuredtext", ".md": "markdown"}
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
master_doc = "index"
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@ -74,10 +74,10 @@ language = None
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This pattern also affects html_static_path and html_extra_path .
|
||||
exclude_patterns = ['Thumbs.db', '.DS_Store', '_scripts', 'venv', '_generated']
|
||||
exclude_patterns = ["Thumbs.db", ".DS_Store", "_scripts", "venv", "_generated"]
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'default'
|
||||
pygments_style = "default"
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
@ -88,15 +88,17 @@ pygments_style = 'default'
|
||||
|
||||
# import sphinx_theme
|
||||
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
|
||||
html_theme_path = ["_themes", ]
|
||||
html_theme_path = [
|
||||
"_themes",
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
html_logo = '_static/fd-io_red_white.png'
|
||||
html_logo = "_static/fd-io_red_white.png"
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
@ -107,11 +109,12 @@ html_logo = '_static/fd-io_red_white.png'
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
html_static_path = ["_static"]
|
||||
|
||||
|
||||
def setup(app):
|
||||
app.add_css_file('css/rules.css')
|
||||
app.add_css_file("css/rules.css")
|
||||
|
||||
|
||||
# Custom sidebar templates, must be a dictionary that maps document names
|
||||
# to template names.
|
||||
@ -127,7 +130,7 @@ def setup(app):
|
||||
# -- Options for HTMLHelp output ---------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'Vector Packet Processor'
|
||||
htmlhelp_basename = "Vector Packet Processor"
|
||||
|
||||
|
||||
# -- Options for LaTeX output ------------------------------------------------
|
||||
@ -136,15 +139,12 @@ latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#
|
||||
# 'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#
|
||||
# 'figure_align': 'htbp',
|
||||
@ -154,9 +154,13 @@ latex_elements = {
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'Vector Packet Processor.tex',
|
||||
u'Vector Packet Processor Documentation',
|
||||
u'John DeNisco', 'manual'),
|
||||
(
|
||||
master_doc,
|
||||
"Vector Packet Processor.tex",
|
||||
"Vector Packet Processor Documentation",
|
||||
"John DeNisco",
|
||||
"manual",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@ -165,9 +169,13 @@ latex_documents = [
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'Vector Packet Processor',
|
||||
u'Vector Packet Processor Documentation',
|
||||
[author], 1)
|
||||
(
|
||||
master_doc,
|
||||
"Vector Packet Processor",
|
||||
"Vector Packet Processor Documentation",
|
||||
[author],
|
||||
1,
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
@ -177,10 +185,15 @@ man_pages = [
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'Vector Packet Processor',
|
||||
u'Vector Packet Processor Documentation',
|
||||
author, 'Vector Packet Processor', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
(
|
||||
master_doc,
|
||||
"Vector Packet Processor",
|
||||
"Vector Packet Processor Documentation",
|
||||
author,
|
||||
"Vector Packet Processor",
|
||||
"One line description of project.",
|
||||
"Miscellaneous",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
|
@ -4,48 +4,58 @@ import json, argparse
|
||||
|
||||
p = argparse.ArgumentParser()
|
||||
|
||||
p.add_argument('-i', '--input', action="store",
|
||||
help="input JSON file name", required = True)
|
||||
p.add_argument(
|
||||
"-i", "--input", action="store", help="input JSON file name", required=True
|
||||
)
|
||||
|
||||
p.add_argument('-o', '--output', action="store",
|
||||
help="output C file name", required = True)
|
||||
p.add_argument(
|
||||
"-o", "--output", action="store", help="output C file name", required=True
|
||||
)
|
||||
|
||||
p.add_argument('-m', '--model', action="append",
|
||||
help="CPU model in format: model[,stepping0]",
|
||||
required = True)
|
||||
p.add_argument(
|
||||
"-m",
|
||||
"--model",
|
||||
action="append",
|
||||
help="CPU model in format: model[,stepping0]",
|
||||
required=True,
|
||||
)
|
||||
|
||||
r = p.parse_args()
|
||||
|
||||
with open(r.input, 'r') as fp:
|
||||
with open(r.input, "r") as fp:
|
||||
objects = json.load(fp)
|
||||
|
||||
c = open(r.output, 'w')
|
||||
c = open(r.output, "w")
|
||||
|
||||
c.write ("""
|
||||
c.write(
|
||||
"""
|
||||
#include <perfmon/perfmon_intel.h>
|
||||
|
||||
static perfmon_intel_pmc_cpu_model_t cpu_model_table[] = {
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
for v in r.model:
|
||||
if "," in v:
|
||||
(m, s) = v.split(",")
|
||||
(m, s) = v.split(",")
|
||||
m = int(m, 0)
|
||||
s = int(s, 0)
|
||||
c.write (" {}0x{:02X}, 0x{:02X}, 1{},\n".format("{", m, s, "}"))
|
||||
c.write(" {}0x{:02X}, 0x{:02X}, 1{},\n".format("{", m, s, "}"))
|
||||
else:
|
||||
m = int(v, 0)
|
||||
c.write (" {}0x{:02X}, 0x00, 0{},\n".format("{", m, "}"))
|
||||
c.write ("""
|
||||
c.write(" {}0x{:02X}, 0x00, 0{},\n".format("{", m, "}"))
|
||||
c.write(
|
||||
"""
|
||||
};
|
||||
|
||||
static perfmon_intel_pmc_event_t event_table[] = {
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
for obj in objects:
|
||||
MSRIndex = obj["MSRIndex"]
|
||||
if MSRIndex != "0":
|
||||
continue
|
||||
continue
|
||||
|
||||
EventCode = obj["EventCode"]
|
||||
UMask = obj["UMask"]
|
||||
@ -53,20 +63,22 @@ for obj in objects:
|
||||
if "," in EventCode:
|
||||
continue
|
||||
|
||||
c.write (" {\n")
|
||||
c.write (" .event_code = {}{}{},\n".format("{", EventCode, "}"))
|
||||
c.write (" .umask = {},\n".format(UMask))
|
||||
c.write (" .event_name = \"{}\",\n".format(EventName))
|
||||
c.write (" },\n")
|
||||
c.write(" {\n")
|
||||
c.write(" .event_code = {}{}{},\n".format("{", EventCode, "}"))
|
||||
c.write(" .umask = {},\n".format(UMask))
|
||||
c.write(' .event_name = "{}",\n'.format(EventName))
|
||||
c.write(" },\n")
|
||||
|
||||
|
||||
c.write (""" {
|
||||
c.write(
|
||||
""" {
|
||||
.event_name = 0,
|
||||
},
|
||||
};
|
||||
|
||||
PERFMON_REGISTER_INTEL_PMC (cpu_model_table, event_table);
|
||||
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
c.close()
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -16,37 +16,43 @@ from vpp_ip import INVALID_INDEX
|
||||
|
||||
|
||||
class TestVxlanGbp(VppTestCase):
|
||||
""" VXLAN GBP Test Case """
|
||||
"""VXLAN GBP Test Case"""
|
||||
|
||||
@property
|
||||
def frame_request(self):
|
||||
""" Ethernet frame modeling a generic request """
|
||||
return (Ether(src='00:00:00:00:00:01', dst='00:00:00:00:00:02') /
|
||||
IP(src='1.2.3.4', dst='4.3.2.1') /
|
||||
UDP(sport=10000, dport=20000) /
|
||||
Raw(b'\xa5' * 100))
|
||||
"""Ethernet frame modeling a generic request"""
|
||||
return (
|
||||
Ether(src="00:00:00:00:00:01", dst="00:00:00:00:00:02")
|
||||
/ IP(src="1.2.3.4", dst="4.3.2.1")
|
||||
/ UDP(sport=10000, dport=20000)
|
||||
/ Raw(b"\xa5" * 100)
|
||||
)
|
||||
|
||||
@property
|
||||
def frame_reply(self):
|
||||
""" Ethernet frame modeling a generic reply """
|
||||
return (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
|
||||
IP(src='4.3.2.1', dst='1.2.3.4') /
|
||||
UDP(sport=20000, dport=10000) /
|
||||
Raw(b'\xa5' * 100))
|
||||
"""Ethernet frame modeling a generic reply"""
|
||||
return (
|
||||
Ether(src="00:00:00:00:00:02", dst="00:00:00:00:00:01")
|
||||
/ IP(src="4.3.2.1", dst="1.2.3.4")
|
||||
/ UDP(sport=20000, dport=10000)
|
||||
/ Raw(b"\xa5" * 100)
|
||||
)
|
||||
|
||||
def encapsulate(self, pkt, vni):
|
||||
"""
|
||||
Encapsulate the original payload frame by adding VXLAN GBP header with
|
||||
its UDP, IP and Ethernet fields
|
||||
"""
|
||||
return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
|
||||
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
|
||||
UDP(sport=self.dport, dport=self.dport, chksum=0) /
|
||||
VXLAN(vni=vni, flags=self.flags, gpflags=self.gpflags,
|
||||
gpid=self.sclass) / pkt)
|
||||
return (
|
||||
Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)
|
||||
/ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4)
|
||||
/ UDP(sport=self.dport, dport=self.dport, chksum=0)
|
||||
/ VXLAN(vni=vni, flags=self.flags, gpflags=self.gpflags, gpid=self.sclass)
|
||||
/ pkt
|
||||
)
|
||||
|
||||
def ip_range(self, start, end):
|
||||
""" range of remote ip's """
|
||||
"""range of remote ip's"""
|
||||
return ip4_range(self.pg0.remote_ip4, start, end)
|
||||
|
||||
def decapsulate(self, pkt):
|
||||
@ -54,7 +60,7 @@ class TestVxlanGbp(VppTestCase):
|
||||
Decapsulate the original payload frame by removing VXLAN header
|
||||
"""
|
||||
# check if is set G and I flag
|
||||
self.assertEqual(pkt[VXLAN].flags, int('0x88', 16))
|
||||
self.assertEqual(pkt[VXLAN].flags, int("0x88", 16))
|
||||
return pkt[VXLAN].payload
|
||||
|
||||
# Method for checking VXLAN GBP encapsulation.
|
||||
@ -94,28 +100,28 @@ class TestVxlanGbp(VppTestCase):
|
||||
ip_range_start = 10
|
||||
ip_range_end = ip_range_start + n_ucast_tunnels
|
||||
next_hop_address = cls.pg0.remote_ip4
|
||||
for dest_ip4 in ip4_range(cls.pg0.remote_ip4,
|
||||
ip_range_start,
|
||||
ip_range_end):
|
||||
for dest_ip4 in ip4_range(cls.pg0.remote_ip4, ip_range_start, ip_range_end):
|
||||
# add host route so dest_ip4 will not be resolved
|
||||
rip = VppIpRoute(cls, dest_ip4, 32,
|
||||
[VppRoutePath(next_hop_address,
|
||||
INVALID_INDEX)],
|
||||
register=False)
|
||||
rip = VppIpRoute(
|
||||
cls,
|
||||
dest_ip4,
|
||||
32,
|
||||
[VppRoutePath(next_hop_address, INVALID_INDEX)],
|
||||
register=False,
|
||||
)
|
||||
rip.add_vpp_config()
|
||||
r = cls.vapi.vxlan_gbp_tunnel_add_del(
|
||||
tunnel={
|
||||
'src': cls.pg0.local_ip4,
|
||||
'dst': dest_ip4,
|
||||
'vni': vni,
|
||||
'instance': INVALID_INDEX,
|
||||
'mcast_sw_if_index': INVALID_INDEX,
|
||||
'mode': 1,
|
||||
"src": cls.pg0.local_ip4,
|
||||
"dst": dest_ip4,
|
||||
"vni": vni,
|
||||
"instance": INVALID_INDEX,
|
||||
"mcast_sw_if_index": INVALID_INDEX,
|
||||
"mode": 1,
|
||||
},
|
||||
is_add=1
|
||||
is_add=1,
|
||||
)
|
||||
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
|
||||
bd_id=vni)
|
||||
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index, bd_id=vni)
|
||||
|
||||
# Class method to start the VXLAN GBP test case.
|
||||
# Overrides setUpClass method in VppTestCase class.
|
||||
@ -146,33 +152,33 @@ class TestVxlanGbp(VppTestCase):
|
||||
# Create VXLAN GBP VTEP on VPP pg0, and put vxlan_gbp_tunnel0 and
|
||||
# pg1 into BD.
|
||||
cls.single_tunnel_bd = 1
|
||||
cls.single_tunnel_vni = 0xabcde
|
||||
cls.single_tunnel_vni = 0xABCDE
|
||||
r = cls.vapi.vxlan_gbp_tunnel_add_del(
|
||||
tunnel={
|
||||
'src': cls.pg0.local_ip4,
|
||||
'dst': cls.pg0.remote_ip4,
|
||||
'vni': cls.single_tunnel_vni,
|
||||
'instance': INVALID_INDEX,
|
||||
'mcast_sw_if_index': INVALID_INDEX,
|
||||
'mode': 1,
|
||||
"src": cls.pg0.local_ip4,
|
||||
"dst": cls.pg0.remote_ip4,
|
||||
"vni": cls.single_tunnel_vni,
|
||||
"instance": INVALID_INDEX,
|
||||
"mcast_sw_if_index": INVALID_INDEX,
|
||||
"mode": 1,
|
||||
},
|
||||
is_add=1
|
||||
is_add=1,
|
||||
)
|
||||
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
|
||||
bd_id=cls.single_tunnel_bd)
|
||||
cls.vapi.sw_interface_set_l2_bridge(
|
||||
rx_sw_if_index=cls.pg1.sw_if_index,
|
||||
bd_id=cls.single_tunnel_bd)
|
||||
rx_sw_if_index=r.sw_if_index, bd_id=cls.single_tunnel_bd
|
||||
)
|
||||
cls.vapi.sw_interface_set_l2_bridge(
|
||||
rx_sw_if_index=cls.pg1.sw_if_index, bd_id=cls.single_tunnel_bd
|
||||
)
|
||||
|
||||
# Setup vni 2 to test multicast flooding
|
||||
cls.n_ucast_tunnels = 2
|
||||
# Setup vni 3 to test unicast flooding
|
||||
cls.ucast_flood_bd = 3
|
||||
cls.create_vxlan_gbp_flood_test_bd(cls.ucast_flood_bd,
|
||||
cls.n_ucast_tunnels)
|
||||
cls.create_vxlan_gbp_flood_test_bd(cls.ucast_flood_bd, cls.n_ucast_tunnels)
|
||||
cls.vapi.sw_interface_set_l2_bridge(
|
||||
rx_sw_if_index=cls.pg3.sw_if_index,
|
||||
bd_id=cls.ucast_flood_bd)
|
||||
rx_sw_if_index=cls.pg3.sw_if_index, bd_id=cls.ucast_flood_bd
|
||||
)
|
||||
except Exception:
|
||||
super(TestVxlanGbp, cls).tearDownClass()
|
||||
raise
|
||||
@ -182,7 +188,7 @@ class TestVxlanGbp(VppTestCase):
|
||||
super(TestVxlanGbp, cls).tearDownClass()
|
||||
|
||||
def assert_eq_pkts(self, pkt1, pkt2):
|
||||
""" Verify the Ether, IP, UDP, payload are equal in both
|
||||
"""Verify the Ether, IP, UDP, payload are equal in both
|
||||
packets
|
||||
"""
|
||||
self.assertEqual(pkt1[Ether].src, pkt2[Ether].src)
|
||||
@ -194,14 +200,17 @@ class TestVxlanGbp(VppTestCase):
|
||||
self.assertEqual(pkt1[Raw], pkt2[Raw])
|
||||
|
||||
def test_decap(self):
|
||||
""" Decapsulation test
|
||||
"""Decapsulation test
|
||||
Send encapsulated frames from pg0
|
||||
Verify receipt of decapsulated frames on pg1
|
||||
"""
|
||||
encapsulated_pkt = self.encapsulate(self.frame_request,
|
||||
self.single_tunnel_vni)
|
||||
encapsulated_pkt = self.encapsulate(self.frame_request, self.single_tunnel_vni)
|
||||
|
||||
self.pg0.add_stream([encapsulated_pkt, ])
|
||||
self.pg0.add_stream(
|
||||
[
|
||||
encapsulated_pkt,
|
||||
]
|
||||
)
|
||||
|
||||
self.pg1.enable_capture()
|
||||
|
||||
@ -214,7 +223,7 @@ class TestVxlanGbp(VppTestCase):
|
||||
self.assert_eq_pkts(pkt, self.frame_request)
|
||||
|
||||
def test_encap(self):
|
||||
""" Encapsulation test
|
||||
"""Encapsulation test
|
||||
Send frames from pg1
|
||||
Verify receipt of encapsulated frames on pg0
|
||||
"""
|
||||
@ -233,7 +242,7 @@ class TestVxlanGbp(VppTestCase):
|
||||
self.assert_eq_pkts(payload, self.frame_reply)
|
||||
|
||||
def test_ucast_flood(self):
|
||||
""" Unicast flood test
|
||||
"""Unicast flood test
|
||||
Send frames from pg3
|
||||
Verify receipt of encapsulated frames on pg0
|
||||
"""
|
||||
@ -251,16 +260,18 @@ class TestVxlanGbp(VppTestCase):
|
||||
self.assert_eq_pkts(payload, self.frame_reply)
|
||||
|
||||
def test_encap_big_packet(self):
|
||||
""" Encapsulation test send big frame from pg1
|
||||
"""Encapsulation test send big frame from pg1
|
||||
Verify receipt of encapsulated frames on pg0
|
||||
"""
|
||||
|
||||
self.vapi.sw_interface_set_mtu(self.pg0.sw_if_index, [1500, 0, 0, 0])
|
||||
|
||||
frame = (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
|
||||
IP(src='4.3.2.1', dst='1.2.3.4') /
|
||||
UDP(sport=20000, dport=10000) /
|
||||
Raw(b'\xa5' * 1450))
|
||||
frame = (
|
||||
Ether(src="00:00:00:00:00:02", dst="00:00:00:00:00:01")
|
||||
/ IP(src="4.3.2.1", dst="1.2.3.4")
|
||||
/ UDP(sport=20000, dport=10000)
|
||||
/ Raw(b"\xa5" * 1450)
|
||||
)
|
||||
|
||||
self.pg1.add_stream([frame])
|
||||
|
||||
@ -276,9 +287,9 @@ class TestVxlanGbp(VppTestCase):
|
||||
payload = self.decapsulate(pkt)
|
||||
self.assert_eq_pkts(payload, frame)
|
||||
|
||||
# Method to define VPP actions before tear down of the test case.
|
||||
# Overrides tearDown method in VppTestCase class.
|
||||
# @param self The object pointer.
|
||||
# Method to define VPP actions before tear down of the test case.
|
||||
# Overrides tearDown method in VppTestCase class.
|
||||
# @param self The object pointer.
|
||||
def tearDown(self):
|
||||
super(TestVxlanGbp, self).tearDown()
|
||||
|
||||
@ -289,5 +300,5 @@ class TestVxlanGbp(VppTestCase):
|
||||
self.logger.info(self.vapi.cli("show error"))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
unittest.main(testRunner=VppTestRunner)
|
||||
|
@ -1,17 +1,18 @@
|
||||
|
||||
from vpp_interface import VppInterface
|
||||
from vpp_papi import VppEnum
|
||||
|
||||
|
||||
INDEX_INVALID = 0xffffffff
|
||||
INDEX_INVALID = 0xFFFFFFFF
|
||||
|
||||
|
||||
def find_vxlan_gbp_tunnel(test, src, dst, vni):
|
||||
ts = test.vapi.vxlan_gbp_tunnel_dump(INDEX_INVALID)
|
||||
for t in ts:
|
||||
if src == str(t.tunnel.src) and \
|
||||
dst == str(t.tunnel.dst) and \
|
||||
t.tunnel.vni == vni:
|
||||
if (
|
||||
src == str(t.tunnel.src)
|
||||
and dst == str(t.tunnel.dst)
|
||||
and t.tunnel.vni == vni
|
||||
):
|
||||
return t.tunnel.sw_if_index
|
||||
return INDEX_INVALID
|
||||
|
||||
@ -21,9 +22,19 @@ class VppVxlanGbpTunnel(VppInterface):
|
||||
VPP VXLAN GBP interface
|
||||
"""
|
||||
|
||||
def __init__(self, test, src, dst, vni, mcast_itf=None, mode=None,
|
||||
is_ipv6=None, encap_table_id=None, instance=0xffffffff):
|
||||
""" Create VXLAN-GBP Tunnel interface """
|
||||
def __init__(
|
||||
self,
|
||||
test,
|
||||
src,
|
||||
dst,
|
||||
vni,
|
||||
mcast_itf=None,
|
||||
mode=None,
|
||||
is_ipv6=None,
|
||||
encap_table_id=None,
|
||||
instance=0xFFFFFFFF,
|
||||
):
|
||||
"""Create VXLAN-GBP Tunnel interface"""
|
||||
super(VppVxlanGbpTunnel, self).__init__(test)
|
||||
self.src = src
|
||||
self.dst = dst
|
||||
@ -33,21 +44,23 @@ class VppVxlanGbpTunnel(VppInterface):
|
||||
self.encap_table_id = encap_table_id
|
||||
self.instance = instance
|
||||
if not mode:
|
||||
self.mode = (VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
|
||||
VXLAN_GBP_API_TUNNEL_MODE_L2)
|
||||
self.mode = (
|
||||
VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.VXLAN_GBP_API_TUNNEL_MODE_L2
|
||||
)
|
||||
else:
|
||||
self.mode = mode
|
||||
|
||||
def encode(self):
|
||||
return {
|
||||
'src': self.src,
|
||||
'dst': self.dst,
|
||||
'mode': self.mode,
|
||||
'vni': self.vni,
|
||||
'mcast_sw_if_index': self.mcast_itf.sw_if_index
|
||||
if self.mcast_itf else INDEX_INVALID,
|
||||
'encap_table_id': self.encap_table_id,
|
||||
'instance': self.instance,
|
||||
"src": self.src,
|
||||
"dst": self.dst,
|
||||
"mode": self.mode,
|
||||
"vni": self.vni,
|
||||
"mcast_sw_if_index": self.mcast_itf.sw_if_index
|
||||
if self.mcast_itf
|
||||
else INDEX_INVALID,
|
||||
"encap_table_id": self.encap_table_id,
|
||||
"instance": self.instance,
|
||||
}
|
||||
|
||||
def add_vpp_config(self):
|
||||
@ -65,11 +78,14 @@ class VppVxlanGbpTunnel(VppInterface):
|
||||
)
|
||||
|
||||
def query_vpp_config(self):
|
||||
return (INDEX_INVALID != find_vxlan_gbp_tunnel(self._test,
|
||||
self.src,
|
||||
self.dst,
|
||||
self.vni))
|
||||
return INDEX_INVALID != find_vxlan_gbp_tunnel(
|
||||
self._test, self.src, self.dst, self.vni
|
||||
)
|
||||
|
||||
def object_id(self):
|
||||
return "vxlan-gbp-%d-%d-%s-%s" % (self.sw_if_index, self.vni,
|
||||
self.src, self.dst)
|
||||
return "vxlan-gbp-%d-%d-%s-%s" % (
|
||||
self.sw_if_index,
|
||||
self.vni,
|
||||
self.src,
|
||||
self.dst,
|
||||
)
|
||||
|
@ -4,13 +4,12 @@
|
||||
import unittest
|
||||
import os
|
||||
import signal
|
||||
from framework import VppTestCase, running_extended_tests, \
|
||||
VppTestRunner, Worker
|
||||
from framework import VppTestCase, running_extended_tests, VppTestRunner, Worker
|
||||
|
||||
|
||||
@unittest.skipUnless(running_extended_tests, "part of extended tests")
|
||||
class VOMTestCase(VppTestCase):
|
||||
""" VPP Object Model Test """
|
||||
"""VPP Object Model Test"""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
@ -21,14 +20,14 @@ class VOMTestCase(VppTestCase):
|
||||
super(VOMTestCase, cls).tearDownClass()
|
||||
|
||||
def test_vom_cpp(self):
|
||||
""" run C++ VOM tests """
|
||||
"""run C++ VOM tests"""
|
||||
var = "TEST_BR"
|
||||
built_root = os.getenv(var, None)
|
||||
self.assertIsNotNone(built_root,
|
||||
"Environment variable `%s' not set" % var)
|
||||
self.assertIsNotNone(built_root, "Environment variable `%s' not set" % var)
|
||||
executable = "%s/vom_test/vom_test" % built_root
|
||||
worker = Worker([executable, "vpp object model",
|
||||
self.get_api_segment_prefix()], self.logger)
|
||||
worker = Worker(
|
||||
[executable, "vpp object model", self.get_api_segment_prefix()], self.logger
|
||||
)
|
||||
worker.start()
|
||||
timeout = 120
|
||||
worker.join(timeout)
|
||||
@ -37,17 +36,15 @@ class VOMTestCase(VppTestCase):
|
||||
if worker.result is None:
|
||||
try:
|
||||
error = True
|
||||
self.logger.error(
|
||||
"Timeout! Worker did not finish in %ss" % timeout)
|
||||
self.logger.error("Timeout! Worker did not finish in %ss" % timeout)
|
||||
os.killpg(os.getpgid(worker.process.pid), signal.SIGTERM)
|
||||
worker.join()
|
||||
except:
|
||||
raise Exception("Couldn't kill worker-spawned process")
|
||||
if error:
|
||||
raise Exception(
|
||||
"Timeout! Worker did not finish in %ss" % timeout)
|
||||
raise Exception("Timeout! Worker did not finish in %ss" % timeout)
|
||||
self.assert_equal(worker.result, 0, "Binary test return code")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
unittest.main(testRunner=VppTestRunner)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -4,21 +4,21 @@ import fnmatch
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
starttag = 'v19.08-rc0'
|
||||
endtag = 'HEAD'
|
||||
starttag = "v19.08-rc0"
|
||||
endtag = "HEAD"
|
||||
emit_md = True
|
||||
apifiles = []
|
||||
|
||||
for root, dirnames, filenames in os.walk('.'):
|
||||
for filename in fnmatch.filter(filenames, '*.api'):
|
||||
for root, dirnames, filenames in os.walk("."):
|
||||
for filename in fnmatch.filter(filenames, "*.api"):
|
||||
apifiles.append(os.path.join(root, filename))
|
||||
|
||||
for f in apifiles:
|
||||
commits = subprocess.check_output(['git', 'log',
|
||||
'--oneline', starttag + '..' + endtag,
|
||||
f])
|
||||
commits = subprocess.check_output(
|
||||
["git", "log", "--oneline", starttag + ".." + endtag, f]
|
||||
)
|
||||
if commits:
|
||||
if f[0:2] == './':
|
||||
if f[0:2] == "./":
|
||||
f = f[2:]
|
||||
if emit_md:
|
||||
print("| @c %s ||" % f)
|
||||
@ -27,9 +27,10 @@ for f in apifiles:
|
||||
parts = line.strip().split()
|
||||
commit = parts[0]
|
||||
message = b" ".join(parts[1:]).decode().replace("|", r"\|")
|
||||
print("| [%s](https://gerrit.fd.io/r/gitweb?"
|
||||
"p=vpp.git;a=commit;h=%s) | %s |" % (
|
||||
commit, commit, message))
|
||||
print(
|
||||
"| [%s](https://gerrit.fd.io/r/gitweb?"
|
||||
"p=vpp.git;a=commit;h=%s) | %s |" % (commit, commit, message)
|
||||
)
|
||||
print()
|
||||
else:
|
||||
print(f)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,33 +1,31 @@
|
||||
from setuptools import setup
|
||||
|
||||
setup(name="vpp_config",
|
||||
version="20.05.1",
|
||||
author="John DeNisco",
|
||||
author_email="jdenisco@cisco.com",
|
||||
description="VPP Configuration Utility",
|
||||
license='Apache-2.0',
|
||||
keywords="vppconfig",
|
||||
url='https://wiki.fd.io/view/VPP',
|
||||
py_modules=['vpp_config'],
|
||||
install_requires=['distro', 'pyyaml', 'requests'],
|
||||
extra_requires=["ipaddress; python_version < '3.3'"],
|
||||
packages=['vpplib'],
|
||||
entry_points={
|
||||
'console_scripts': ['vpp-config=vpp_config:config_main'],
|
||||
},
|
||||
data_files=[('vpp/vpp-config/scripts', ['scripts/dpdk-devbind.py']),
|
||||
('vpp/vpp-config/configs', ['data/auto-config.yaml']),
|
||||
('vpp/vpp-config/configs', ['data/cloud-config.iso']),
|
||||
('vpp/vpp-config/configs',
|
||||
['data/iperf-centos.xml.template']),
|
||||
('vpp/vpp-config/configs',
|
||||
['data/iperf-ubuntu.xml.template']),
|
||||
('vpp/vpp-config/dryrun/sysctl.d',
|
||||
['data/80-vpp.conf.template']),
|
||||
('vpp/vpp-config/dryrun/default', ['data/grub.template']),
|
||||
('vpp/vpp-config/dryrun/vpp',
|
||||
['data/startup.conf.template']),
|
||||
],
|
||||
long_description="The VPP configuration utility can be used to "
|
||||
"easily configure VPP.",
|
||||
)
|
||||
setup(
|
||||
name="vpp_config",
|
||||
version="20.05.1",
|
||||
author="John DeNisco",
|
||||
author_email="jdenisco@cisco.com",
|
||||
description="VPP Configuration Utility",
|
||||
license="Apache-2.0",
|
||||
keywords="vppconfig",
|
||||
url="https://wiki.fd.io/view/VPP",
|
||||
py_modules=["vpp_config"],
|
||||
install_requires=["distro", "pyyaml", "requests"],
|
||||
extra_requires=["ipaddress; python_version < '3.3'"],
|
||||
packages=["vpplib"],
|
||||
entry_points={
|
||||
"console_scripts": ["vpp-config=vpp_config:config_main"],
|
||||
},
|
||||
data_files=[
|
||||
("vpp/vpp-config/scripts", ["scripts/dpdk-devbind.py"]),
|
||||
("vpp/vpp-config/configs", ["data/auto-config.yaml"]),
|
||||
("vpp/vpp-config/configs", ["data/cloud-config.iso"]),
|
||||
("vpp/vpp-config/configs", ["data/iperf-centos.xml.template"]),
|
||||
("vpp/vpp-config/configs", ["data/iperf-ubuntu.xml.template"]),
|
||||
("vpp/vpp-config/dryrun/sysctl.d", ["data/80-vpp.conf.template"]),
|
||||
("vpp/vpp-config/dryrun/default", ["data/grub.template"]),
|
||||
("vpp/vpp-config/dryrun/vpp", ["data/startup.conf.template"]),
|
||||
],
|
||||
long_description="The VPP configuration utility can be used to "
|
||||
"easily configure VPP.",
|
||||
)
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -78,13 +78,14 @@ class CpuUtils(object):
|
||||
# 1,1,0,0,,1,1,1,0
|
||||
if ret != 0:
|
||||
raise RuntimeError(
|
||||
"Failed to execute ssh command, ret: {} err: {}".format(
|
||||
ret, stderr))
|
||||
node['cpuinfo'] = list()
|
||||
"Failed to execute ssh command, ret: {} err: {}".format(ret, stderr)
|
||||
)
|
||||
node["cpuinfo"] = list()
|
||||
for line in stdout.split("\n"):
|
||||
if line != '' and line[0] != "#":
|
||||
node['cpuinfo'].append([CpuUtils.__str2int(x) for x in
|
||||
line.split(",")])
|
||||
if line != "" and line[0] != "#":
|
||||
node["cpuinfo"].append(
|
||||
[CpuUtils.__str2int(x) for x in line.split(",")]
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def cpu_node_count(node):
|
||||
@ -137,13 +138,14 @@ class CpuUtils(object):
|
||||
|
||||
if smt_enabled and not smt_used:
|
||||
cpu_list_len = len(cpu_list)
|
||||
cpu_list = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
|
||||
cpu_list = cpu_list[: cpu_list_len // CpuUtils.NR_OF_THREADS]
|
||||
|
||||
return cpu_list
|
||||
|
||||
@staticmethod
|
||||
def cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=0, cpu_cnt=0,
|
||||
smt_used=False):
|
||||
def cpu_slice_of_list_per_node(
|
||||
node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False
|
||||
):
|
||||
"""Return string of node related list of CPU numbers.
|
||||
|
||||
:param node: Node dictionary with cpuinfo.
|
||||
@ -171,20 +173,20 @@ class CpuUtils(object):
|
||||
cpu_cnt = cpu_list_len - skip_cnt
|
||||
|
||||
if smt_used:
|
||||
cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
|
||||
cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
|
||||
cpu_list = [cpu for cpu in cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]]
|
||||
cpu_list_ex = [cpu for cpu in
|
||||
cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]]
|
||||
cpu_list_0 = cpu_list[: cpu_list_len // CpuUtils.NR_OF_THREADS]
|
||||
cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS :]
|
||||
cpu_list = [cpu for cpu in cpu_list_0[skip_cnt : skip_cnt + cpu_cnt]]
|
||||
cpu_list_ex = [cpu for cpu in cpu_list_1[skip_cnt : skip_cnt + cpu_cnt]]
|
||||
cpu_list.extend(cpu_list_ex)
|
||||
else:
|
||||
cpu_list = [cpu for cpu in cpu_list[skip_cnt:skip_cnt + cpu_cnt]]
|
||||
cpu_list = [cpu for cpu in cpu_list[skip_cnt : skip_cnt + cpu_cnt]]
|
||||
|
||||
return cpu_list
|
||||
|
||||
@staticmethod
|
||||
def cpu_list_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",",
|
||||
smt_used=False):
|
||||
def cpu_list_per_node_str(
|
||||
node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",", smt_used=False
|
||||
):
|
||||
"""Return string of node related list of CPU numbers.
|
||||
|
||||
:param node: Node dictionary with cpuinfo.
|
||||
@ -203,15 +205,15 @@ class CpuUtils(object):
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node,
|
||||
skip_cnt=skip_cnt,
|
||||
cpu_cnt=cpu_cnt,
|
||||
smt_used=smt_used)
|
||||
cpu_list = CpuUtils.cpu_slice_of_list_per_node(
|
||||
node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, smt_used=smt_used
|
||||
)
|
||||
return sep.join(str(cpu) for cpu in cpu_list)
|
||||
|
||||
@staticmethod
|
||||
def cpu_range_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep="-",
|
||||
smt_used=False):
|
||||
def cpu_range_per_node_str(
|
||||
node, cpu_node, skip_cnt=0, cpu_cnt=0, sep="-", smt_used=False
|
||||
):
|
||||
"""Return string of node related range of CPU numbers, e.g. 0-4.
|
||||
|
||||
:param node: Node dictionary with cpuinfo.
|
||||
@ -230,18 +232,16 @@ class CpuUtils(object):
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node,
|
||||
skip_cnt=skip_cnt,
|
||||
cpu_cnt=cpu_cnt,
|
||||
smt_used=smt_used)
|
||||
cpu_list = CpuUtils.cpu_slice_of_list_per_node(
|
||||
node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, smt_used=smt_used
|
||||
)
|
||||
if smt_used:
|
||||
cpu_list_len = len(cpu_list)
|
||||
cpu_list_0 = cpu_list[:cpu_list_len // CpuUtils.NR_OF_THREADS]
|
||||
cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS:]
|
||||
cpu_range = "{}{}{},{}{}{}".format(cpu_list_0[0], sep,
|
||||
cpu_list_0[-1],
|
||||
cpu_list_1[0], sep,
|
||||
cpu_list_1[-1])
|
||||
cpu_list_0 = cpu_list[: cpu_list_len // CpuUtils.NR_OF_THREADS]
|
||||
cpu_list_1 = cpu_list[cpu_list_len // CpuUtils.NR_OF_THREADS :]
|
||||
cpu_range = "{}{}{},{}{}{}".format(
|
||||
cpu_list_0[0], sep, cpu_list_0[-1], cpu_list_1[0], sep, cpu_list_1[-1]
|
||||
)
|
||||
else:
|
||||
cpu_range = "{}{}{}".format(cpu_list[0], sep, cpu_list[-1])
|
||||
|
||||
@ -260,28 +260,30 @@ class CpuUtils(object):
|
||||
cmd = "lscpu"
|
||||
ret, stdout, stderr = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError("lscpu command failed on node {} {}."
|
||||
.format(node['host'], stderr))
|
||||
raise RuntimeError(
|
||||
"lscpu command failed on node {} {}.".format(node["host"], stderr)
|
||||
)
|
||||
|
||||
cpuinfo = {}
|
||||
lines = stdout.split('\n')
|
||||
lines = stdout.split("\n")
|
||||
for line in lines:
|
||||
if line != '':
|
||||
linesplit = re.split(r':\s+', line)
|
||||
if line != "":
|
||||
linesplit = re.split(r":\s+", line)
|
||||
cpuinfo[linesplit[0]] = linesplit[1]
|
||||
|
||||
cmd = "cat /proc/*/task/*/stat | awk '{print $1" "$2" "$39}'"
|
||||
ret, stdout, stderr = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError("cat command failed on node {} {}."
|
||||
.format(node['host'], stderr))
|
||||
raise RuntimeError(
|
||||
"cat command failed on node {} {}.".format(node["host"], stderr)
|
||||
)
|
||||
|
||||
vpp_processes = {}
|
||||
vpp_lines = re.findall(r'\w+\(vpp_\w+\)\w+', stdout)
|
||||
vpp_lines = re.findall(r"\w+\(vpp_\w+\)\w+", stdout)
|
||||
for line in vpp_lines:
|
||||
linesplit = re.split(r'\w+\(', line)[1].split(')')
|
||||
linesplit = re.split(r"\w+\(", line)[1].split(")")
|
||||
vpp_processes[linesplit[0]] = linesplit[1]
|
||||
|
||||
cpuinfo['vpp_processes'] = vpp_processes
|
||||
cpuinfo["vpp_processes"] = vpp_processes
|
||||
|
||||
return cpuinfo
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -17,11 +17,11 @@ import re
|
||||
|
||||
from vpplib.VPPUtil import VPPUtil
|
||||
|
||||
__all__ = ['VppGrubUtil']
|
||||
__all__ = ["VppGrubUtil"]
|
||||
|
||||
|
||||
class VppGrubUtil(object):
|
||||
""" VPP Grub Utilities."""
|
||||
"""VPP Grub Utilities."""
|
||||
|
||||
def _get_current_cmdline(self):
|
||||
"""
|
||||
@ -32,14 +32,14 @@ class VppGrubUtil(object):
|
||||
"""
|
||||
|
||||
# Get the memory information using /proc/meminfo
|
||||
cmd = 'sudo cat /proc/cmdline'
|
||||
cmd = "sudo cat /proc/cmdline"
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError('{} on node {} {} {}'.
|
||||
format(cmd, self._node['host'],
|
||||
stdout, stderr))
|
||||
raise RuntimeError(
|
||||
"{} on node {} {} {}".format(cmd, self._node["host"], stdout, stderr)
|
||||
)
|
||||
|
||||
self._current_cmdline = stdout.strip('\n')
|
||||
self._current_cmdline = stdout.strip("\n")
|
||||
|
||||
def _get_default_cmdline(self):
|
||||
"""
|
||||
@ -50,21 +50,24 @@ class VppGrubUtil(object):
|
||||
"""
|
||||
|
||||
# Get the default grub cmdline
|
||||
rootdir = self._node['rootdir']
|
||||
gfile = self._node['cpu']['grub_config_file']
|
||||
grubcmdline = self._node['cpu']['grubcmdline']
|
||||
cmd = 'cat {}'.format(rootdir + gfile)
|
||||
rootdir = self._node["rootdir"]
|
||||
gfile = self._node["cpu"]["grub_config_file"]
|
||||
grubcmdline = self._node["cpu"]["grubcmdline"]
|
||||
cmd = "cat {}".format(rootdir + gfile)
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError('{} Executing failed on node {} {}'.
|
||||
format(cmd, self._node['host'], stderr))
|
||||
raise RuntimeError(
|
||||
"{} Executing failed on node {} {}".format(
|
||||
cmd, self._node["host"], stderr
|
||||
)
|
||||
)
|
||||
|
||||
# Get the Default Linux command line, ignoring commented lines
|
||||
lines = stdout.split('\n')
|
||||
lines = stdout.split("\n")
|
||||
for line in lines:
|
||||
if line == '' or line[0] == '#':
|
||||
if line == "" or line[0] == "#":
|
||||
continue
|
||||
ldefault = re.findall(r'{}=.+'.format(grubcmdline), line)
|
||||
ldefault = re.findall(r"{}=.+".format(grubcmdline), line)
|
||||
if ldefault:
|
||||
self._default_cmdline = ldefault[0]
|
||||
break
|
||||
@ -96,9 +99,9 @@ class VppGrubUtil(object):
|
||||
:returns: The command line
|
||||
:rtype: string
|
||||
"""
|
||||
grubcmdline = self._node['cpu']['grubcmdline']
|
||||
grubcmdline = self._node["cpu"]["grubcmdline"]
|
||||
cmdline = self._default_cmdline
|
||||
value = cmdline.split('{}='.format(grubcmdline))[1]
|
||||
value = cmdline.split("{}=".format(grubcmdline))[1]
|
||||
value = value.rstrip('"').lstrip('"')
|
||||
|
||||
# jadfix intel_pstate=disable sometimes cause networks to
|
||||
@ -111,43 +114,43 @@ class VppGrubUtil(object):
|
||||
# value = '{} intel_pstate=disable'.format(value)
|
||||
|
||||
# Replace isolcpus with ours
|
||||
isolcpus = re.findall(r'isolcpus=[\w+\-,]+', value)
|
||||
isolcpus = re.findall(r"isolcpus=[\w+\-,]+", value)
|
||||
if not isolcpus:
|
||||
if isolated_cpus != '':
|
||||
if isolated_cpus != "":
|
||||
value = "{} isolcpus={}".format(value, isolated_cpus)
|
||||
else:
|
||||
if isolated_cpus != '':
|
||||
value = re.sub(r'isolcpus=[\w+\-,]+',
|
||||
'isolcpus={}'.format(isolated_cpus),
|
||||
value)
|
||||
if isolated_cpus != "":
|
||||
value = re.sub(
|
||||
r"isolcpus=[\w+\-,]+", "isolcpus={}".format(isolated_cpus), value
|
||||
)
|
||||
else:
|
||||
value = re.sub(r'isolcpus=[\w+\-,]+', '', value)
|
||||
value = re.sub(r"isolcpus=[\w+\-,]+", "", value)
|
||||
|
||||
nohz = re.findall(r'nohz_full=[\w+\-,]+', value)
|
||||
nohz = re.findall(r"nohz_full=[\w+\-,]+", value)
|
||||
if not nohz:
|
||||
if isolated_cpus != '':
|
||||
if isolated_cpus != "":
|
||||
value = "{} nohz_full={}".format(value, isolated_cpus)
|
||||
else:
|
||||
if isolated_cpus != '':
|
||||
value = re.sub(r'nohz_full=[\w+\-,]+',
|
||||
'nohz_full={}'.format(isolated_cpus),
|
||||
value)
|
||||
if isolated_cpus != "":
|
||||
value = re.sub(
|
||||
r"nohz_full=[\w+\-,]+", "nohz_full={}".format(isolated_cpus), value
|
||||
)
|
||||
else:
|
||||
value = re.sub(r'nohz_full=[\w+\-,]+', '', value)
|
||||
value = re.sub(r"nohz_full=[\w+\-,]+", "", value)
|
||||
|
||||
rcu = re.findall(r'rcu_nocbs=[\w+\-,]+', value)
|
||||
rcu = re.findall(r"rcu_nocbs=[\w+\-,]+", value)
|
||||
if not rcu:
|
||||
if isolated_cpus != '':
|
||||
if isolated_cpus != "":
|
||||
value = "{} rcu_nocbs={}".format(value, isolated_cpus)
|
||||
else:
|
||||
if isolated_cpus != '':
|
||||
value = re.sub(r'rcu_nocbs=[\w+\-,]+',
|
||||
'rcu_nocbs={}'.format(isolated_cpus),
|
||||
value)
|
||||
if isolated_cpus != "":
|
||||
value = re.sub(
|
||||
r"rcu_nocbs=[\w+\-,]+", "rcu_nocbs={}".format(isolated_cpus), value
|
||||
)
|
||||
else:
|
||||
value = re.sub(r'rcu_nocbs=[\w+\-,]+', '', value)
|
||||
value = re.sub(r"rcu_nocbs=[\w+\-,]+", "", value)
|
||||
|
||||
value = value.lstrip(' ').rstrip(' ')
|
||||
value = value.lstrip(" ").rstrip(" ")
|
||||
cmdline = '{}="{}"'.format(grubcmdline, value)
|
||||
return cmdline
|
||||
|
||||
@ -167,69 +170,68 @@ class VppGrubUtil(object):
|
||||
if len(vpp_cmdline):
|
||||
# Update grub
|
||||
# Save the original file
|
||||
rootdir = node['rootdir']
|
||||
grubcmdline = node['cpu']['grubcmdline']
|
||||
ofilename = rootdir + node['cpu']['grub_config_file'] + '.orig'
|
||||
filename = rootdir + node['cpu']['grub_config_file']
|
||||
rootdir = node["rootdir"]
|
||||
grubcmdline = node["cpu"]["grubcmdline"]
|
||||
ofilename = rootdir + node["cpu"]["grub_config_file"] + ".orig"
|
||||
filename = rootdir + node["cpu"]["grub_config_file"]
|
||||
|
||||
# Write the output file
|
||||
# Does a copy of the original file exist, if not create one
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(
|
||||
'ls {}'.format(ofilename))
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command("ls {}".format(ofilename))
|
||||
if ret != 0:
|
||||
if stdout.strip('\n') != ofilename:
|
||||
cmd = 'sudo cp {} {}'.format(filename, ofilename)
|
||||
if stdout.strip("\n") != ofilename:
|
||||
cmd = "sudo cp {} {}".format(filename, ofilename)
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError('{} failed on node {} {}'.
|
||||
format(cmd, self._node['host'],
|
||||
stderr))
|
||||
raise RuntimeError(
|
||||
"{} failed on node {} {}".format(
|
||||
cmd, self._node["host"], stderr
|
||||
)
|
||||
)
|
||||
|
||||
# Get the contents of the current grub config file
|
||||
cmd = 'cat {}'.format(filename)
|
||||
cmd = "cat {}".format(filename)
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError('{} failed on node {} {}'.format(
|
||||
cmd,
|
||||
self._node['host'],
|
||||
stderr))
|
||||
raise RuntimeError(
|
||||
"{} failed on node {} {}".format(cmd, self._node["host"], stderr)
|
||||
)
|
||||
|
||||
# Write the new contents
|
||||
# Get the Default Linux command line, ignoring commented lines
|
||||
content = ""
|
||||
lines = stdout.split('\n')
|
||||
lines = stdout.split("\n")
|
||||
for line in lines:
|
||||
if line == '':
|
||||
content += line + '\n'
|
||||
if line == "":
|
||||
content += line + "\n"
|
||||
continue
|
||||
if line[0] == '#':
|
||||
content += line + '\n'
|
||||
if line[0] == "#":
|
||||
content += line + "\n"
|
||||
continue
|
||||
|
||||
ldefault = re.findall(r'{}=.+'.format(grubcmdline), line)
|
||||
ldefault = re.findall(r"{}=.+".format(grubcmdline), line)
|
||||
if ldefault:
|
||||
content += vpp_cmdline + '\n'
|
||||
content += vpp_cmdline + "\n"
|
||||
else:
|
||||
content += line + '\n'
|
||||
content += line + "\n"
|
||||
|
||||
content = content.replace(r"`", r"\`")
|
||||
content = content.rstrip('\n')
|
||||
content = content.rstrip("\n")
|
||||
cmd = "sudo cat > {0} << EOF\n{1}\n".format(filename, content)
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError('{} failed on node {} {}'.format(
|
||||
cmd,
|
||||
self._node['host'],
|
||||
stderr))
|
||||
raise RuntimeError(
|
||||
"{} failed on node {} {}".format(cmd, self._node["host"], stderr)
|
||||
)
|
||||
|
||||
return vpp_cmdline
|
||||
|
||||
def __init__(self, node):
|
||||
distro = VPPUtil.get_linux_distro()
|
||||
if distro[0] == 'Ubuntu':
|
||||
node['cpu']['grubcmdline'] = 'GRUB_CMDLINE_LINUX_DEFAULT'
|
||||
if distro[0] == "Ubuntu":
|
||||
node["cpu"]["grubcmdline"] = "GRUB_CMDLINE_LINUX_DEFAULT"
|
||||
else:
|
||||
node['cpu']['grubcmdline'] = 'GRUB_CMDLINE_LINUX'
|
||||
node["cpu"]["grubcmdline"] = "GRUB_CMDLINE_LINUX"
|
||||
|
||||
self._node = node
|
||||
self._current_cmdline = ""
|
||||
|
@ -33,6 +33,7 @@ class VppHugePageUtil(object):
|
||||
"""
|
||||
Huge Page Utilities
|
||||
"""
|
||||
|
||||
def hugepages_dryrun_apply(self):
|
||||
"""
|
||||
Apply the huge page configuration
|
||||
@ -40,23 +41,23 @@ class VppHugePageUtil(object):
|
||||
"""
|
||||
|
||||
node = self._node
|
||||
hugepages = node['hugepages']
|
||||
hugepages = node["hugepages"]
|
||||
|
||||
vpp_hugepage_config = VPP_HUGEPAGE_CONFIG.format(
|
||||
nr_hugepages=hugepages['total'],
|
||||
max_map_count=hugepages['max_map_count'],
|
||||
shmmax=hugepages['shmax'])
|
||||
nr_hugepages=hugepages["total"],
|
||||
max_map_count=hugepages["max_map_count"],
|
||||
shmmax=hugepages["shmax"],
|
||||
)
|
||||
|
||||
rootdir = node['rootdir']
|
||||
filename = rootdir + node['hugepages']['hugepage_config_file']
|
||||
rootdir = node["rootdir"]
|
||||
filename = rootdir + node["hugepages"]["hugepage_config_file"]
|
||||
|
||||
cmd = 'echo "{0}" | sudo tee {1}'.\
|
||||
format(vpp_hugepage_config, filename)
|
||||
cmd = 'echo "{0}" | sudo tee {1}'.format(vpp_hugepage_config, filename)
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError('{} failed on node {} {} {}'.
|
||||
format(cmd, node['host'],
|
||||
stdout, stderr))
|
||||
raise RuntimeError(
|
||||
"{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
|
||||
)
|
||||
|
||||
def get_actual_huge_pages(self):
|
||||
"""
|
||||
@ -68,25 +69,26 @@ class VppHugePageUtil(object):
|
||||
"""
|
||||
|
||||
# Get the memory information using /proc/meminfo
|
||||
cmd = 'sudo cat /proc/meminfo'
|
||||
cmd = "sudo cat /proc/meminfo"
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError(
|
||||
'{} failed on node {} {} {}'.format(
|
||||
cmd, self._node['host'],
|
||||
stdout, stderr))
|
||||
"{} failed on node {} {} {}".format(
|
||||
cmd, self._node["host"], stdout, stderr
|
||||
)
|
||||
)
|
||||
|
||||
total = re.findall(r'HugePages_Total:\s+\w+', stdout)
|
||||
free = re.findall(r'HugePages_Free:\s+\w+', stdout)
|
||||
size = re.findall(r'Hugepagesize:\s+\w+\s+\w+', stdout)
|
||||
memtotal = re.findall(r'MemTotal:\s+\w+\s+\w+', stdout)
|
||||
memfree = re.findall(r'MemFree:\s+\w+\s+\w+', stdout)
|
||||
total = re.findall(r"HugePages_Total:\s+\w+", stdout)
|
||||
free = re.findall(r"HugePages_Free:\s+\w+", stdout)
|
||||
size = re.findall(r"Hugepagesize:\s+\w+\s+\w+", stdout)
|
||||
memtotal = re.findall(r"MemTotal:\s+\w+\s+\w+", stdout)
|
||||
memfree = re.findall(r"MemFree:\s+\w+\s+\w+", stdout)
|
||||
|
||||
total = total[0].split(':')[1].lstrip()
|
||||
free = free[0].split(':')[1].lstrip()
|
||||
size = size[0].split(':')[1].lstrip()
|
||||
memtotal = memtotal[0].split(':')[1].lstrip()
|
||||
memfree = memfree[0].split(':')[1].lstrip()
|
||||
total = total[0].split(":")[1].lstrip()
|
||||
free = free[0].split(":")[1].lstrip()
|
||||
size = size[0].split(":")[1].lstrip()
|
||||
memtotal = memtotal[0].split(":")[1].lstrip()
|
||||
memfree = memfree[0].split(":")[1].lstrip()
|
||||
return total, free, size, memtotal, memfree
|
||||
|
||||
def show_huge_pages(self):
|
||||
@ -96,17 +98,13 @@ class VppHugePageUtil(object):
|
||||
"""
|
||||
|
||||
node = self._node
|
||||
hugepages = node['hugepages']
|
||||
print (" {:30}: {}".format("Total System Memory",
|
||||
hugepages['memtotal']))
|
||||
print (" {:30}: {}".format("Total Free Memory",
|
||||
hugepages['memfree']))
|
||||
print (" {:30}: {}".format("Actual Huge Page Total",
|
||||
hugepages['actual_total']))
|
||||
print (" {:30}: {}".format("Configured Huge Page Total",
|
||||
hugepages['total']))
|
||||
print (" {:30}: {}".format("Huge Pages Free", hugepages['free']))
|
||||
print (" {:30}: {}".format("Huge Page Size", hugepages['size']))
|
||||
hugepages = node["hugepages"]
|
||||
print(" {:30}: {}".format("Total System Memory", hugepages["memtotal"]))
|
||||
print(" {:30}: {}".format("Total Free Memory", hugepages["memfree"]))
|
||||
print(" {:30}: {}".format("Actual Huge Page Total", hugepages["actual_total"]))
|
||||
print(" {:30}: {}".format("Configured Huge Page Total", hugepages["total"]))
|
||||
print(" {:30}: {}".format("Huge Pages Free", hugepages["free"]))
|
||||
print(" {:30}: {}".format("Huge Page Size", hugepages["size"]))
|
||||
|
||||
def get_huge_page_config(self):
|
||||
"""
|
||||
@ -115,7 +113,7 @@ class VppHugePageUtil(object):
|
||||
:returns: The map max count and shmmax
|
||||
"""
|
||||
|
||||
total = self._node['hugepages']['total']
|
||||
total = self._node["hugepages"]["total"]
|
||||
max_map_count = int(total) * 2 + 1024
|
||||
shmmax = int(total) * 2 * 1024 * 1024
|
||||
return max_map_count, shmmax
|
||||
|
@ -23,7 +23,7 @@ from vpplib.VPPUtil import VPPUtil
|
||||
DPDK_SCRIPT = "/vpp/vpp-config/scripts/dpdk-devbind.py"
|
||||
|
||||
# PCI Device id regular expresssion
|
||||
PCI_DEV_ID_REGEX = '[0-9A-Fa-f]+:[0-9A-Fa-f]+:[0-9A-Fa-f]+.[0-9A-Fa-f]+'
|
||||
PCI_DEV_ID_REGEX = "[0-9A-Fa-f]+:[0-9A-Fa-f]+:[0-9A-Fa-f]+.[0-9A-Fa-f]+"
|
||||
|
||||
|
||||
class VppPCIUtil(object):
|
||||
@ -45,51 +45,47 @@ class VppPCIUtil(object):
|
||||
devices = {}
|
||||
|
||||
ids = re.findall(PCI_DEV_ID_REGEX, device_string)
|
||||
descriptions = re.findall(r'\'([\s\S]*?)\'', device_string)
|
||||
unused = re.findall(r'unused=\w+|unused=', device_string)
|
||||
descriptions = re.findall(r"\'([\s\S]*?)\'", device_string)
|
||||
unused = re.findall(r"unused=\w+|unused=", device_string)
|
||||
|
||||
for i, j in enumerate(ids):
|
||||
device = {'description': descriptions[i]}
|
||||
device = {"description": descriptions[i]}
|
||||
if unused:
|
||||
device['unused'] = unused[i].split('=')[1].split(',')
|
||||
device["unused"] = unused[i].split("=")[1].split(",")
|
||||
|
||||
cmd = 'ls /sys/bus/pci/devices/{}/driver/module/drivers'. \
|
||||
format(ids[i])
|
||||
cmd = "ls /sys/bus/pci/devices/{}/driver/module/drivers".format(ids[i])
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret == 0:
|
||||
device['driver'] = stdout.split(':')[1].rstrip('\n')
|
||||
device["driver"] = stdout.split(":")[1].rstrip("\n")
|
||||
|
||||
cmd = 'cat /sys/bus/pci/devices/{}/numa_node'.format(ids[i])
|
||||
cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(ids[i])
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError('{} failed {} {}'.
|
||||
format(cmd, stderr, stdout))
|
||||
numa_node = stdout.rstrip('\n')
|
||||
if numa_node == '-1':
|
||||
device['numa_node'] = '0'
|
||||
raise RuntimeError("{} failed {} {}".format(cmd, stderr, stdout))
|
||||
numa_node = stdout.rstrip("\n")
|
||||
if numa_node == "-1":
|
||||
device["numa_node"] = "0"
|
||||
else:
|
||||
device['numa_node'] = numa_node
|
||||
device["numa_node"] = numa_node
|
||||
|
||||
interfaces = []
|
||||
device['interfaces'] = []
|
||||
cmd = 'ls /sys/bus/pci/devices/{}/net'.format(ids[i])
|
||||
device["interfaces"] = []
|
||||
cmd = "ls /sys/bus/pci/devices/{}/net".format(ids[i])
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret == 0:
|
||||
interfaces = stdout.rstrip('\n').split()
|
||||
device['interfaces'] = interfaces
|
||||
interfaces = stdout.rstrip("\n").split()
|
||||
device["interfaces"] = interfaces
|
||||
|
||||
l2_addrs = []
|
||||
for intf in interfaces:
|
||||
cmd = 'cat /sys/bus/pci/devices/{}/net/{}/address'.format(
|
||||
ids[i], intf)
|
||||
cmd = "cat /sys/bus/pci/devices/{}/net/{}/address".format(ids[i], intf)
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError('{} failed {} {}'.
|
||||
format(cmd, stderr, stdout))
|
||||
raise RuntimeError("{} failed {} {}".format(cmd, stderr, stdout))
|
||||
|
||||
l2_addrs.append(stdout.rstrip('\n'))
|
||||
l2_addrs.append(stdout.rstrip("\n"))
|
||||
|
||||
device['l2addr'] = l2_addrs
|
||||
device["l2addr"] = l2_addrs
|
||||
|
||||
devices[ids[i]] = device
|
||||
|
||||
@ -112,66 +108,62 @@ class VppPCIUtil(object):
|
||||
"""
|
||||
|
||||
node = self._node
|
||||
rootdir = node['rootdir']
|
||||
rootdir = node["rootdir"]
|
||||
dpdk_script = rootdir + DPDK_SCRIPT
|
||||
cmd = dpdk_script + ' --status'
|
||||
cmd = dpdk_script + " --status"
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError('{} failed on node {} {}'.format(
|
||||
cmd,
|
||||
node['host'],
|
||||
stderr))
|
||||
raise RuntimeError(
|
||||
"{} failed on node {} {}".format(cmd, node["host"], stderr)
|
||||
)
|
||||
|
||||
# Get the network devices using the DPDK
|
||||
# First get everything after using DPDK
|
||||
stda = stdout.split('Network devices using DPDK-compatible driver')[1]
|
||||
stda = stdout.split("Network devices using DPDK-compatible driver")[1]
|
||||
# Then get everything before using kernel driver
|
||||
using_dpdk = stda.split('Network devices using kernel driver')[0]
|
||||
using_dpdk = stda.split("Network devices using kernel driver")[0]
|
||||
self._dpdk_devices = self._create_device_list(using_dpdk)
|
||||
|
||||
# Get the network devices using the kernel
|
||||
stda = stdout.split('Network devices using kernel driver')[1]
|
||||
using_kernel = stda.split('Other network devices')[0]
|
||||
stda = stdout.split("Network devices using kernel driver")[1]
|
||||
using_kernel = stda.split("Other network devices")[0]
|
||||
self._kernel_devices = self._create_device_list(using_kernel)
|
||||
|
||||
# Get the other network devices
|
||||
stda = stdout.split('Other network devices')[1]
|
||||
other = stda.split('Crypto devices using DPDK-compatible driver')[0]
|
||||
stda = stdout.split("Other network devices")[1]
|
||||
other = stda.split("Crypto devices using DPDK-compatible driver")[0]
|
||||
self._other_devices = self._create_device_list(other)
|
||||
|
||||
# Get the crypto devices using the DPDK
|
||||
stda = stdout.split('Crypto devices using DPDK-compatible driver')[1]
|
||||
crypto_using_dpdk = stda.split('Crypto devices using kernel driver')[0]
|
||||
self._crypto_dpdk_devices = self._create_device_list(
|
||||
crypto_using_dpdk)
|
||||
stda = stdout.split("Crypto devices using DPDK-compatible driver")[1]
|
||||
crypto_using_dpdk = stda.split("Crypto devices using kernel driver")[0]
|
||||
self._crypto_dpdk_devices = self._create_device_list(crypto_using_dpdk)
|
||||
|
||||
# Get the network devices using the kernel
|
||||
stda = stdout.split('Crypto devices using kernel driver')[1]
|
||||
crypto_using_kernel = stda.split('Other crypto devices')[0]
|
||||
self._crypto_kernel_devices = self._create_device_list(
|
||||
crypto_using_kernel)
|
||||
stda = stdout.split("Crypto devices using kernel driver")[1]
|
||||
crypto_using_kernel = stda.split("Other crypto devices")[0]
|
||||
self._crypto_kernel_devices = self._create_device_list(crypto_using_kernel)
|
||||
|
||||
# Get the other network devices
|
||||
crypto_other = stdout.split('Other crypto devices')[1]
|
||||
crypto_other = stdout.split("Other crypto devices")[1]
|
||||
self._crypto_other_devices = self._create_device_list(crypto_other)
|
||||
|
||||
# Get the devices used by the kernel
|
||||
for devk in self._kernel_devices.items():
|
||||
dvid = devk[0]
|
||||
device = devk[1]
|
||||
for i in device['interfaces']:
|
||||
for i in device["interfaces"]:
|
||||
cmd = "ip addr show " + i
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError('{} failed on node {} {}'.format(
|
||||
cmd,
|
||||
node['host'],
|
||||
stderr))
|
||||
lstate = re.findall(r'state \w+', stdout)[0].split(' ')[1]
|
||||
raise RuntimeError(
|
||||
"{} failed on node {} {}".format(cmd, node["host"], stderr)
|
||||
)
|
||||
lstate = re.findall(r"state \w+", stdout)[0].split(" ")[1]
|
||||
|
||||
# Take care of the links that are UP
|
||||
if lstate == 'UP':
|
||||
device['linkup'] = True
|
||||
if lstate == "UP":
|
||||
device["linkup"] = True
|
||||
self._link_up_devices[dvid] = device
|
||||
|
||||
for devl in self._link_up_devices.items():
|
||||
@ -234,18 +226,18 @@ class VppPCIUtil(object):
|
||||
|
||||
"""
|
||||
|
||||
name = 'port' + str(len(interfaces))
|
||||
name = "port" + str(len(interfaces))
|
||||
interfaces[name] = {}
|
||||
interfaces[name]['pci_address'] = device_id
|
||||
interfaces[name]['numa_node'] = device['numa_node']
|
||||
if 'l2addr' in device:
|
||||
l2_addrs = device['l2addr']
|
||||
interfaces[name]["pci_address"] = device_id
|
||||
interfaces[name]["numa_node"] = device["numa_node"]
|
||||
if "l2addr" in device:
|
||||
l2_addrs = device["l2addr"]
|
||||
for i, j in enumerate(l2_addrs):
|
||||
if i > 0:
|
||||
mname = 'mac_address' + str(i + 1)
|
||||
mname = "mac_address" + str(i + 1)
|
||||
interfaces[name][mname] = l2_addrs[i]
|
||||
else:
|
||||
interfaces[name]['mac_address'] = l2_addrs[i]
|
||||
interfaces[name]["mac_address"] = l2_addrs[i]
|
||||
|
||||
@staticmethod
|
||||
def show_vpp_devices(devices, show_interfaces=True, show_header=True):
|
||||
@ -261,34 +253,33 @@ class VppPCIUtil(object):
|
||||
"""
|
||||
|
||||
if show_interfaces:
|
||||
header = "{:15} {:25} {:50}".format("PCI ID",
|
||||
"Kernel Interface(s)",
|
||||
"Description")
|
||||
header = "{:15} {:25} {:50}".format(
|
||||
"PCI ID", "Kernel Interface(s)", "Description"
|
||||
)
|
||||
else:
|
||||
header = "{:15} {:50}".format("PCI ID",
|
||||
"Description")
|
||||
dashseparator = ("-" * (len(header) - 2))
|
||||
header = "{:15} {:50}".format("PCI ID", "Description")
|
||||
dashseparator = "-" * (len(header) - 2)
|
||||
|
||||
if show_header is True:
|
||||
print (header)
|
||||
print (dashseparator)
|
||||
print(header)
|
||||
print(dashseparator)
|
||||
for dit in devices.items():
|
||||
dvid = dit[0]
|
||||
device = dit[1]
|
||||
if show_interfaces:
|
||||
interfaces = device['interfaces']
|
||||
interface = ''
|
||||
interfaces = device["interfaces"]
|
||||
interface = ""
|
||||
for i, j in enumerate(interfaces):
|
||||
if i > 0:
|
||||
interface += ',' + interfaces[i]
|
||||
interface += "," + interfaces[i]
|
||||
else:
|
||||
interface = interfaces[i]
|
||||
|
||||
print ("{:15} {:25} {:50}".format(
|
||||
dvid, interface, device['description']))
|
||||
print(
|
||||
"{:15} {:25} {:50}".format(dvid, interface, device["description"])
|
||||
)
|
||||
else:
|
||||
print ("{:15} {:50}".format(
|
||||
dvid, device['description']))
|
||||
print("{:15} {:50}".format(dvid, device["description"]))
|
||||
|
||||
@staticmethod
|
||||
def unbind_vpp_device(node, device_id):
|
||||
@ -301,14 +292,14 @@ class VppPCIUtil(object):
|
||||
:type device_id: string
|
||||
"""
|
||||
|
||||
rootdir = node['rootdir']
|
||||
rootdir = node["rootdir"]
|
||||
dpdk_script = rootdir + DPDK_SCRIPT
|
||||
cmd = dpdk_script + ' -u ' + ' ' + device_id
|
||||
cmd = dpdk_script + " -u " + " " + device_id
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
raise RuntimeError('{} failed on node {} {} {}'.format(
|
||||
cmd, node['host'],
|
||||
stdout, stderr))
|
||||
raise RuntimeError(
|
||||
"{} failed on node {} {} {}".format(cmd, node["host"], stdout, stderr)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def bind_vpp_device(node, driver, device_id):
|
||||
@ -324,14 +315,14 @@ class VppPCIUtil(object):
|
||||
:returns ret: Command return code
|
||||
"""
|
||||
|
||||
rootdir = node['rootdir']
|
||||
rootdir = node["rootdir"]
|
||||
dpdk_script = rootdir + DPDK_SCRIPT
|
||||
cmd = dpdk_script + ' -b ' + driver + ' ' + device_id
|
||||
cmd = dpdk_script + " -b " + driver + " " + device_id
|
||||
(ret, stdout, stderr) = VPPUtil.exec_command(cmd)
|
||||
if ret != 0:
|
||||
logging.error('{} failed on node {}'.format(
|
||||
cmd, node['host'], stdout, stderr))
|
||||
logging.error('{} {}'.format(
|
||||
stdout, stderr))
|
||||
logging.error(
|
||||
"{} failed on node {}".format(cmd, node["host"], stdout, stderr)
|
||||
)
|
||||
logging.error("{} {}".format(stdout, stderr))
|
||||
|
||||
return ret
|
||||
|
@ -18,31 +18,31 @@ class Constants(object):
|
||||
"""Constants used in CSIT."""
|
||||
|
||||
# OpenVPP testing directory location at topology nodes
|
||||
REMOTE_FW_DIR = '/tmp/openvpp-testing'
|
||||
REMOTE_FW_DIR = "/tmp/openvpp-testing"
|
||||
|
||||
# shell scripts location
|
||||
RESOURCES_LIB_SH = 'resources/libraries/bash'
|
||||
RESOURCES_LIB_SH = "resources/libraries/bash"
|
||||
|
||||
# vat templates location
|
||||
RESOURCES_TPL_VAT = 'resources/templates/vat'
|
||||
RESOURCES_TPL_VAT = "resources/templates/vat"
|
||||
|
||||
# OpenVPP VAT binary name
|
||||
VAT_BIN_NAME = 'vpp_api_test'
|
||||
VAT_BIN_NAME = "vpp_api_test"
|
||||
|
||||
# QEMU version to install
|
||||
QEMU_INSTALL_VERSION = 'qemu-2.5.0'
|
||||
QEMU_INSTALL_VERSION = "qemu-2.5.0"
|
||||
|
||||
# QEMU install directory
|
||||
QEMU_INSTALL_DIR = '/opt/qemu-2.5.0'
|
||||
QEMU_INSTALL_DIR = "/opt/qemu-2.5.0"
|
||||
|
||||
# Honeycomb directory location at topology nodes:
|
||||
REMOTE_HC_DIR = '/opt/honeycomb'
|
||||
REMOTE_HC_DIR = "/opt/honeycomb"
|
||||
|
||||
# Honeycomb persistence files location
|
||||
REMOTE_HC_PERSIST = '/var/lib/honeycomb/persist'
|
||||
REMOTE_HC_PERSIST = "/var/lib/honeycomb/persist"
|
||||
|
||||
# Honeycomb templates location
|
||||
RESOURCES_TPL_HC = 'resources/templates/honeycomb'
|
||||
RESOURCES_TPL_HC = "resources/templates/honeycomb"
|
||||
|
||||
# ODL Client Restconf listener port
|
||||
ODL_PORT = 8181
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user