vpp_config: Updates for 20.05

Added external dependancies package. Updated the startup.conf template.

Fix extras/scripts/check_commit_msg so it accepts '_' characters in
feature names.

Type: fix

Signed-off-by: John DeNisco <jdenisco@cisco.com>
Change-Id: I69c33a24b30d31e5b8081021030bff88cbaeace9
Signed-off-by: Dave Barach <dave@barachs.net>
This commit is contained in:
John DeNisco
2020-06-19 15:28:48 -04:00
committed by Dave Barach
parent 3a49130c75
commit df35a20f9b
8 changed files with 141 additions and 97 deletions

View File

@ -2,7 +2,7 @@
KNOWN_FEATURES=$(cat MAINTAINERS | sed -ne 's/^I:[[:space:]]*//p')
FEATURES=$(git show -s --format=%s --no-color \
| sed -ne 's/^\([a-z0-9 -]*\):.*$/\1/p')
| sed -ne 's/^\([a-z0-9_ -]*\):.*$/\1/p')
KNOWN_TYPES="feature fix refactor improvement style docs test make"
TYPE=$(git show -s --format=%b --no-color | sed -ne 's/^Type:[[:space:]]*//p')
ERR="=============================== ERROR ==============================="

View File

@ -11,4 +11,3 @@ nodes:
type: DUT
vpp:
startup_config_file: /vpp/vpp-config/dryrun/vpp/startup.conf
unix: {interactive: false}

View File

@ -1,26 +1,42 @@
unix {{
{unix}
nodaemon
log /var/log/vpp/vpp.log
full-coredump
cli-listen /run/vpp/cli.sock
gid vpp
}}
api-trace {{
## This stanza controls binary API tracing. Unless there is a very strong reason,
## please leave this feature enabled.
on
## Additional parameters:
##
## To set the number of binary API trace records in the circular buffer, configure nitems
##
## nitems <nnn>
##
## To save the api message table decode tables, configure a filename. Results in /tmp/<filename>
## Very handy for understanding api message changes between versions, identifying missing
## plugins, and so forth.
##
## save-api-table <filename>
}}
socksvr {{
default
}}
cpu {{
{cpu}
# scheduler-policy fifo
# scheduler-priority 50
## In the VPP there is one main thread and optionally the user can create worker(s)
## In the VPP there is one main thread and optionally the user can create worker(s)
## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically
## Manual pinning of thread(s) to CPU core(s)
## Set logical CPU core where main thread runs
## Set logical CPU core where main thread runs, if main core is not set
## VPP will use core 1 if available
# main-core 1
## Set logical CPU core(s) where worker threads are running
@ -50,10 +66,23 @@ cpu {{
# scheduler-priority 50
}}
buffers {{
{buffers}
## Increase number of buffers allocated, needed only in scenarios with
## large number of interfaces and worker threads. Value is per numa node.
## Default is 16384 (8192 if running unpriviledged)
# buffers-per-numa 128000
## Size of buffer data area
## Default is 2048
# default data-size 2048
}}
dpdk {{
{devices}
## Change default settings for all interfaces
## Change default settings for all interfaces
# dev default {{
## Number of receive queues, enables RSS
## Default is 1
@ -72,62 +101,90 @@ dpdk {{
## VLAN strip offload mode for interface
## Default is off
# vlan-strip-offload on
## TCP Segment Offload
## Default is off
## To enable TSO, 'enable-tcp-udp-checksum' must be set
# tso on
## Devargs
## device specific init args
## Default is NULL
# devargs safe-mode-support=1,pipeline-mode-support=1
# }}
## Whitelist specific interface by specifying PCI address
# dev 0000:02:00.0
## Blacklist specific device type by specifying PCI vendor:device
## Whitelist entries take precedence
# blacklist 8086:10fb
## Set interface name
# dev 0000:02:00.1 {{
# name eth0
# }}
## Whitelist specific interface by specifying PCI address and in
## addition specify custom parameters for this interface
# dev 0000:02:00.1 {{
# num-rx-queues 2
# }}
## Specify bonded interface and its slaves via PCI addresses
##
## Bonded interface in XOR load balance mode (mode 2) with L3 and L4 headers
# vdev eth_bond0,mode=2,slave=0000:02:00.0,slave=0000:03:00.0,xmit_policy=l34
# vdev eth_bond1,mode=2,slave=0000:02:00.1,slave=0000:03:00.1,xmit_policy=l34
##
## Bonded interface in Active-Back up mode (mode 1)
# vdev eth_bond0,mode=1,slave=0000:02:00.0,slave=0000:03:00.0
# vdev eth_bond1,mode=1,slave=0000:02:00.1,slave=0000:03:00.1
## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci
## and uio_pci_generic (default)
## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci,
## uio_pci_generic or auto (default)
# uio-driver vfio-pci
## Disable multi-segment buffers, improves performance but
## disables Jumbo MTU support
# no-multi-seg
## Increase number of buffers allocated, needed only in scenarios with
## large number of interfaces and worker threads. Value is per CPU socket.
## Default is 16384
# num-mbufs 128000
## Change hugepages allocation per-socket, needed only if there is need for
## larger number of mbufs. Default is 256M on each detected CPU socket
# socket-mem 2048,2048
## Disables UDP / TCP TX checksum offload. Typically needed for use
## faster vector PMDs (together with no-multi-seg)
# no-tx-checksum-offload
## Enable UDP / TCP TX checksum offload
## This is the reversed option of 'no-tx-checksum-offload'
# enable-tcp-udp-checksum
}}
# Adjusting the plugin path depending on where the VPP plugins are:
#plugins
#{{
# path /home/bms/vpp/build-root/install-vpp-native/vpp/lib/vpp_plugins
## node variant defaults
#node {{
## specify the preferred default variant
# default {{ variant avx512 }}
## specify the preferred variant, for a given node
# ip4-rewrite {{ variant avx2 }}
#}}
# Alternate syntax to choose plugin path
#plugin_path /home/bms/vpp/build-root/install-vpp-native/vpp/lib/vpp_plugins
#node-variants {{
# defaults {{
# avx512 100
# }}
# ip4-inacl {{
# avx2 100
# avx512 50
# }}
#}}
# plugins {{
## Adjusting the plugin path depending on where the VPP plugins are
# path /ws/vpp/build-root/install-vpp-native/vpp/lib/vpp_plugins
## Disable all plugins by default and then selectively enable specific plugins
# plugin default {{ disable }}
# plugin dpdk_plugin.so {{ enable }}
# plugin acl_plugin.so {{ enable }}
## Enable all plugins by default and then selectively disable specific plugins
# plugin dpdk_plugin.so {{ disable }}
# plugin acl_plugin.so {{ disable }}
# }}
## Statistics Segment
# statseg {{
# socket-name <filename>, name of the stats segment socket
# defaults to /run/vpp/stats.sock
# size <nnn>[KMG], size of the stats segment, defaults to 32mb
# per-node-counters on | off, defaults to none
# update-interval <f64-seconds>, sets the segment scrape / update interval
# }}
{tcp}

View File

@ -59,7 +59,7 @@ case "$LSB" in
;;
esac
sudo -H pip uninstall vpp-config
sudo -H pip3 uninstall vpp-config
sudo rm *~
sudo rm *.pyc
sudo rm vpplib/*~

View File

@ -0,0 +1,8 @@
certifi==2020.4.5.1
chardet==3.0.4
distro==1.5.0
idna==2.9
pkg-resources==0.0.0
PyYAML==5.3.1
requests==2.23.0
urllib3==1.25.9

View File

@ -1,7 +1,7 @@
from setuptools import setup
setup(name="vpp_config",
version="19.04.2",
version="20.05.1",
author="John DeNisco",
author_email="jdenisco@cisco.com",
description="VPP Configuration Utility",

View File

@ -315,27 +315,6 @@ class AutoConfig(object):
hpg = VppHugePageUtil(node)
hpg.hugepages_dryrun_apply()
@staticmethod
def _apply_vpp_unix(node):
"""
Apply the VPP Unix config
:param node: Node dictionary with cpuinfo.
:type node: dict
"""
unix = ' nodaemon\n'
if 'unix' not in node['vpp']:
return ''
unixv = node['vpp']['unix']
if 'interactive' in unixv:
interactive = unixv['interactive']
if interactive is True:
unix = ' interactive\n'
return unix.rstrip('\n')
@staticmethod
def _apply_vpp_cpu(node):
"""
@ -382,7 +361,6 @@ class AutoConfig(object):
devices = ''
ports_per_numa = node['cpu']['ports_per_numa']
total_mbufs = node['cpu']['total_mbufs']
for item in ports_per_numa.items():
value = item[1]
@ -417,12 +395,25 @@ class AutoConfig(object):
devices += ' num-tx-desc {}\n'.format(num_tx_desc)
devices += ' }'
return devices
@staticmethod
def _apply_buffers(node):
"""
Apply VPP PCI Device configuration to vpp startup.
:param node: Node dictionary with cpuinfo.
:type node: dict
"""
buffers = ''
total_mbufs = node['cpu']['total_mbufs']
# If the total mbufs is not 0 or less than the default, set num-bufs
logging.debug("Total mbufs: {}".format(total_mbufs))
if total_mbufs != 0 and total_mbufs > 16384:
devices += '\n num-mbufs {}'.format(total_mbufs)
buffers += ' buffers-per-numa {}'.format(total_mbufs)
return devices
return buffers
@staticmethod
def _calc_vpp_workers(node, vpp_workers, numa_node, other_cpus_end,
@ -504,9 +495,7 @@ class AutoConfig(object):
# Get the descriptor entries
desc_entries = 1024
ports_per_numa_value['rx_queues'] = rx_queues
total_mbufs = (((rx_queues * desc_entries) +
(tx_queues * desc_entries)) *
total_ports_per_numa)
total_mbufs = ((rx_queues * desc_entries) + (tx_queues * desc_entries)) * total_ports_per_numa
return total_mbufs
@ -554,8 +543,7 @@ class AutoConfig(object):
# Get the number of cpus to skip, we never use the first cpu
other_cpus_start = 1
other_cpus_end = other_cpus_start + \
node['cpu']['total_other_cpus'] - 1
other_cpus_end = other_cpus_start + node['cpu']['total_other_cpus'] - 1
other_workers = None
if other_cpus_end != 0:
other_workers = (other_cpus_start, other_cpus_end)
@ -608,7 +596,7 @@ class AutoConfig(object):
@staticmethod
def _apply_vpp_tcp(node):
"""
Apply the VPP Unix config
Apply the tcp config
:param node: Node dictionary with cpuinfo.
:type node: dict
@ -677,15 +665,14 @@ class AutoConfig(object):
rootdir = node['rootdir']
sfile = rootdir + node['vpp']['startup_config_file']
# Get the devices
# Get the buffers
devices = self._apply_vpp_devices(node)
# Get the CPU config
cpu = self._apply_vpp_cpu(node)
# Get the unix config
unix = self._apply_vpp_unix(node)
# Get the buffer configuration
buffers = self._apply_buffers(node)
# Get the TCP configuration, if any
tcp = self._apply_vpp_tcp(node)
@ -699,8 +686,8 @@ class AutoConfig(object):
if ret != 0:
raise RuntimeError('Executing cat command failed to node {}'.
format(node['host']))
startup = stdout.format(unix=unix,
cpu=cpu,
startup = stdout.format(cpu=cpu,
buffers=buffers,
devices=devices,
tcp=tcp)
@ -966,8 +953,7 @@ class AutoConfig(object):
max_other_cores = total_cpus - total_vpp_cpus
if max_other_cores > 0:
question = 'How many core(s) do you want to reserve for ' \
'processes other than VPP? [0-{}][0]? '. \
format(str(max_other_cores))
'processes other than VPP? [0-{}][0]? '. format(str(max_other_cores))
total_other_cpus = self._ask_user_range(question, 0, max_other_cores, 0)
node['cpu']['total_other_cpus'] = total_other_cpus
@ -1198,8 +1184,7 @@ class AutoConfig(object):
dvid = dit[0]
device = dit[1]
if 'unused' in device and len(
device['unused']) != 0 and device['unused'][
0] != '':
device['unused']) != 0 and device['unused'][0] != '':
driver = device['unused'][0]
question = "Would you like to bind the driver {} for {} [y/N]? ".format(driver, dvid)
answer = self._ask_user_yn(question, 'n')
@ -1233,8 +1218,7 @@ class AutoConfig(object):
dvid = dit[0]
device = dit[1]
if 'unused' in device and len(
device['unused']) != 0 and device['unused'][
0] != '':
device['unused']) != 0 and device['unused'][0] != '':
driver = device['unused'][0]
logging.debug(
'Binding device {} to driver {}'.format(
@ -1274,8 +1258,7 @@ class AutoConfig(object):
hugesize = int(size.split(' ')[0])
# The max number of huge pages should be no more than
# 70% of total free memory
maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES // 100) // \
hugesize
maxpages = (int(memfree) * MAX_PERCENT_FOR_HUGE_PAGES // 100) // hugesize
print("\nThere currently {} {} huge pages free.".format(
free, size))
question = "Do you want to reconfigure the number of " \
@ -1793,13 +1776,11 @@ class AutoConfig(object):
setintupstr = 'set interface state {} up\n'.format(
intf['name'])
content += vhoststr + setintdnstr + setintbrstr + \
setvintbrstr + setintvststr + setintupstr
content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
# Write the content to the script
rootdir = node['rootdir']
filename = rootdir + \
'/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
filename = rootdir + '/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp'
with open(filename, 'w+') as sfile:
sfile.write(content)
@ -1934,8 +1915,7 @@ class AutoConfig(object):
setintupstr = 'set interface state {} up\n'.format(
intf['name'])
content += vhoststr + setintdnstr + setintbrstr + \
setvintbrstr + setintvststr + setintupstr
content += vhoststr + setintdnstr + setintbrstr + setvintbrstr + setintvststr + setintupstr
# Write the content to the script
rootdir = node['rootdir']

View File

@ -24,14 +24,14 @@ from collections import Counter
import distro
ubuntu_pkgs = {'release': ['vpp', 'vpp-plugin-core', 'vpp-plugin-dpdk', 'vpp-api-python', 'python3-vpp-api',
'vpp-dbg', 'vpp-dev'],
'vpp-dbg', 'vpp-dev', 'vpp-ext-deps'],
'master': ['vpp', 'vpp-plugin-core', 'vpp-plugin-dpdk', 'vpp-api-python', 'python3-vpp-api',
'vpp-dbg', 'vpp-dev']}
'vpp-dbg', 'vpp-dev', 'vpp-ext-deps']}
centos_pkgs = {'release': ['vpp', 'vpp-selinux-policy', 'vpp-plugins', 'vpp-api-lua',
'vpp-api-python', 'vpp-debuginfo', 'vpp-devel', 'libvpp0'],
'vpp-api-python', 'vpp-debuginfo', 'vpp-devel', 'libvpp0', 'vpp-ext-deps'],
'master': ['vpp', 'vpp-selinux-policy', 'vpp-plugins', 'vpp-api-lua',
'vpp-api-python', 'vpp-debuginfo', 'vpp-devel', 'libvpp0']}
'vpp-api-python', 'vpp-debuginfo', 'vpp-devel', 'libvpp0', 'vpp-ext-deps']}
class VPPUtil(object):