docs: add VPP Container Testbench example and lab

Adding a "VPP container testbench" (pair of Docker containers plus
helper scripts to test Linux and VPP interfaces). Will be part of a
larger set of labs/exercises/tutorials. Putting this baseline setup up
for review first to see if the community sees use/value in it. If so,
additional exercises using the testbench will be added gradually.

Type: improvement
Signed-off-by: Matthew Giassa <mgiassa@cisco.com>
Change-Id: I582310f7355419e907d575f640482ca49cbb282f
This commit is contained in:
Matthew Giassa
2021-11-19 17:06:11 +00:00
committed by Dave Wallace
parent 342a5d472f
commit 4a0dd383cf
9 changed files with 1280 additions and 0 deletions

View File

@ -0,0 +1,82 @@
#------------------------------------------------------------------------------#
# @brief: Dockerfile for building the VPP testbench project.
# @author: Matthew Giassa <mgiassa@cisco.com>
# @copyright: (C) Cisco 2021.
#------------------------------------------------------------------------------#
# Baseline image both client and server inherit from.
FROM ubuntu:focal as baseline
# System packages.
RUN apt update -y && \
DEBIAN_FRONTEND="noninteractive" apt install -y tzdata termshark && \
apt install -y \
apt-transport-https \
axel \
bash \
binutils \
bridge-utils \
ca-certificates \
coreutils \
curl \
gnupg \
htop \
iftop \
iproute2 \
iptables \
iputils-ping \
netcat \
net-tools \
nload \
nmap \
procps \
python3 \
python3-dev \
python3-pip \
sudo \
wget \
tcpdump \
vim \
&& \
apt clean -y
# Python packages.
RUN python3 -m pip install \
scapy
# VPP.
RUN bash -c "curl -L https://packagecloud.io/fdio/master/gpgkey | apt-key add -" && \
bash -c "echo \"deb [trusted=yes] https://packagecloud.io/fdio/release/ubuntu focal main\" >> /etc/apt/sources.list.d/99fd.io.list" && \
apt update && \
apt install -y \
vpp \
vpp-plugin-core \
vpp-plugin-dpdk \
&& \
apt clean -y
# Used by client/server entrypoint scripts.
ADD vpp_testbench_helpers.sh /
#------------------------------------------------------------------------------#
# Client image.
FROM baseline as client_img
# Enable a health probe.
ARG HEALTHCHECK_PORT=8080
ENV HEALTHCHECK_PORT_RUNTIME="${HEALTHCHECK_PORT}"
HEALTHCHECK CMD curl --fail "http://localhost:$HEALTHCHECK_PORT_RUNTIME" || exit 1
# Image-specific overrides.
ADD ./entrypoint_client.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
#------------------------------------------------------------------------------#
# Server image.
FROM baseline as server_img
# Enable a health probe.
ARG HEALTHCHECK_PORT=8080
ENV HEALTHCHECK_PORT_RUNTIME="${HEALTHCHECK_PORT}"
HEALTHCHECK CMD curl --fail "http://localhost:$HEALTHCHECK_PORT_RUNTIME" || exit 1
# Image-specific overrides.
ADD ./entrypoint_server.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

View File

@ -0,0 +1,14 @@
#------------------------------------------------------------------------------#
# @brief: Dockerfile permit/deny-list for building the VPP testbench
# project.
# @author: Matthew Giassa <mgiassa@cisco.com>
# @copyright: (C) Cisco 2021.
#------------------------------------------------------------------------------#
# Ignore everything by default. Permit-list only.
*
# Entrypoint scripts and other artifacts.
!entrypoint_client.sh
!entrypoint_server.sh
!vpp_testbench_helpers.sh

View File

@ -0,0 +1,147 @@
################################################################################
# @brief: Makefile for building the VPP testbench example.
# @author: Matthew Giassa.
# @copyright: (C) Cisco 2021.
################################################################################
#------------------------------------------------------------------------------#
# Constants and settings.
#------------------------------------------------------------------------------#
SHELL=/bin/bash
.DEFAULT_GOAL: all
# Image names.
# TODO: semver2 format if we want to publish these to a registry.
DOCKER_CLIENT_IMG := vpp-testbench-client
DOCKER_CLIENT_REL := local
DOCKER_CLIENT_IMG_FULL := $(DOCKER_CLIENT_IMG):$(DOCKER_CLIENT_REL)
DOCKER_SERVER_IMG := vpp-testbench-server
DOCKER_SERVER_REL := local
DOCKER_SERVER_IMG_FULL := $(DOCKER_SERVER_IMG):$(DOCKER_SERVER_REL)
# Docker build-time settings (and run-time settings as well).
DOCKER_HEALTH_PROBE_PORT := $(shell bash -c ". vpp_testbench_helpers.sh; host_only_get_docker_health_probe_port")
#------------------------------------------------------------------------------#
# Functions.
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Cleanup running containers, Docker networks, etc.; from previous runs.
define cleanup_everything
# Terminate the containers.
bash -c "\
. vpp_testbench_helpers.sh; \
host_only_kill_testbench_client_container $(DOCKER_CLIENT_IMG_FULL); \
host_only_kill_testbench_server_container $(DOCKER_SERVER_IMG_FULL); \
"
# Cleanup Docker bridge network.
bash -c "\
. vpp_testbench_helpers.sh; \
host_only_destroy_docker_networks; \
"
endef
#------------------------------------------------------------------------------#
# Launch our containers and connect them to a private Docker network for
# testing.
define launch_testbench
# Create Docker bridge network.
bash -c "\
. vpp_testbench_helpers.sh; \
host_only_create_docker_networks; \
"
# Launch the containers.
bash -c "\
. vpp_testbench_helpers.sh; \
host_only_run_testbench_client_container $(DOCKER_CLIENT_IMG_FULL); \
host_only_run_testbench_server_container $(DOCKER_SERVER_IMG_FULL); \
"
# Entrypoint scripts will bring up the various links.
# Use "docker ps" to check status of containers, see if their health
# probes are working as expected (i.e. "health"), etc.
endef
#------------------------------------------------------------------------------#
# Goals.
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Default goal.
.PHONY: all
all: docker
@echo Done.
#------------------------------------------------------------------------------#
# Build all docker images.
.PHONY: docker
docker: Dockerfile.vpp_testbench Dockerfile.vpp_testbench.dockerignore \
entrypoint_client.sh entrypoint_server.sh \
vpp_testbench_helpers.sh
# Client image.
DOCKER_BUILDKIT=1 docker build \
--file Dockerfile.vpp_testbench \
--build-arg HEALTHCHECK_PORT=$(DOCKER_HEALTH_PROBE_PORT) \
--tag $(DOCKER_CLIENT_IMG_FULL) \
--target client_img \
.
# Server image.
DOCKER_BUILDKIT=1 docker build \
--file Dockerfile.vpp_testbench \
--build-arg HEALTHCHECK_PORT=$(DOCKER_HEALTH_PROBE_PORT) \
--tag $(DOCKER_SERVER_IMG_FULL) \
--target server_img \
.
#------------------------------------------------------------------------------#
# Execute end-to-end test via containers.
.PHONY: test
test:
# Cleanup anything from previous runs.
$(call cleanup_everything)
# Launch our testbench.
$(call launch_testbench)
# Final cleanup.
$(call cleanup_everything)
#------------------------------------------------------------------------------#
# For manually cleaning up a test that fails partway through its execution.
.PHONY: clean
clean:
$(call cleanup_everything)
#------------------------------------------------------------------------------#
# For manually launching our testbench for interactive testing.
.PHONY: start
start:
$(call launch_testbench)
#------------------------------------------------------------------------------#
# For manually stopping (and cleaning up) our testbench.
.PHONY: stop
stop:
$(call cleanup_everything)
#------------------------------------------------------------------------------#
# Create an interactive shell session connected to the client container (for
# manual testing). Typically preceded by "make start", and concluded with
# "make stop" after exiting the shell.
.PHONY: shell_client
shell_client:
bash -c "\
. vpp_testbench_helpers.sh; \
host_only_shell_client_container; \
"
#------------------------------------------------------------------------------#
# Create an interactive shell session connected to the server container (for
# manual testing). Typically preceded by "make start", and concluded with
# "make stop" after exiting the shell.
.PHONY: shell_server
shell_server:
bash -c "\
. vpp_testbench_helpers.sh; \
host_only_shell_server_container; \
"

View File

@ -0,0 +1,150 @@
#!/bin/bash
################################################################################
# @brief: Launcher/entrypoint script plus helper functions for "client
# side" container in the VPP testbench.
# @author: Matthew Giassa <mgiassa@cisco.com>
# @copyright: (C) Cisco 2021.
################################################################################
################################################################################
# Dependencies.
################################################################################
# Import common settings for server and client. This is supplied via the
# Dockerfile build.
# shellcheck disable=SC1091
. vpp_testbench_helpers.sh
################################################################################
# Globals.
################################################################################
# VPP instance socket.
export VPP_SOCK=/run/vpp/vpp.testbench-client.sock
# Alias for vppctl that uses the correct socket name.
export VPPCTL="vppctl -s ${VPP_SOCK}"
# Our "Docker bridge network". Don't change this value.
export NET_IF_DOCKER="eth0"
# Name of link associated with our VXLAN.
export LINK_VXLAN_LINUX="vxlan-vid-${VXLAN_ID_LINUX}"
################################################################################
# Function definitions.
################################################################################
#------------------------------------------------------------------------------#
# @brief: Alias for vppctl (knowing which API socket to use).
function vc()
{
vppctl -s "${VPP_SOCK}" "${@}"
}
#------------------------------------------------------------------------------#
# @brief: Used to initialize/configure the client container once it's up and
# running.
function context_create()
{
set -x
echo "Running client. Host: $(hostname)"
local mtu
# Setup VXLAN overlay.
ip link add "${LINK_VXLAN_LINUX}" \
type vxlan \
id "${VXLAN_ID_LINUX}" \
dstport "${VXLAN_PORT}" \
local "${CLIENT_BRIDGE_IP_DOCKER}" \
group "${MC_VXLAN_ADDR_LINUX}" \
dev "${NET_IF_DOCKER}" \
ttl 1
ip link set "${LINK_VXLAN_LINUX}" up
ip addr add "${CLIENT_VXLAN_IP_LINUX}/${MASK_VXLAN_LINUX}" dev "${LINK_VXLAN_LINUX}"
# Get MTU of interface. VXLAN must use a smaller value due to overhead.
mtu="$(cat /sys/class/net/${NET_IF_DOCKER}/mtu)"
# Decrease VXLAN MTU. This should already be handled for us by iproute2, but
# just being cautious.
ip link set dev "${LINK_VXLAN_LINUX}" mtu "$((mtu - 50))"
# Bring-up VPP and create tap interfaces and VXLAN tunnel.
vpp \
unix '{' log /tmp/vpp1.log full-coredump cli-listen ${VPP_SOCK} '}' \
api-segment '{' prefix vpp1 '}' \
api-trace '{' on '}' \
dpdk '{' uio-driver uio_pci_generic no-pci '}'
# Wait for VPP to come up.
while ! ${VPPCTL} show log; do
sleep 1
done
# Bring up the memif interface and assign an IP to it.
${VPPCTL} create interface memif id 0 slave
sleep 1
${VPPCTL} set int state memif0/0 up
${VPPCTL} set int ip address memif0/0 "${CLIENT_VPP_MEMIF_IP}/${VPP_MEMIF_NM}"
# Create VPP-controlled tap interface bridged to the memif.
${VPPCTL} create tap id 0 host-if-name vpp-tap-0
sleep 1
${VPPCTL} set interface state tap0 up
ip addr add "${CLIENT_VPP_TAP_IP_MEMIF}/${VPP_TAP_NM}" dev vpp-tap-0
${VPPCTL} set interface l2 bridge tap0 "${VPP_BRIDGE_DOMAIN_TAP}"
${VPPCTL} set interface l2 bridge memif0/0 "${VPP_BRIDGE_DOMAIN_TAP}"
}
#------------------------------------------------------------------------------#
# @brief: Used to shutdown/cleanup the client container.
function context_destroy()
{
# OS will reclaim interfaces and resources when container is terminated.
:
}
#------------------------------------------------------------------------------#
# @brief: Client worker loop to keep the container alive. Just idles.
function context_loop()
{
# Sleep indefinitely (to keep container alive for testing).
tail -f /dev/null
}
#------------------------------------------------------------------------------#
# @brief: Launches a minimalistic web server via netcat. The Dockerfile
# associated with this project is configured to treat the web server
# replying with "200 OK" as a sort of simple health probe.
function health_check_init()
{
while true; do
echo -e "HTTP/1.1 200 OK\n\nHOST:$(hostname)\nDATE:$(date)" \
| nc -l -p "${DOCKER_HEALTH_PROBE_PORT}" -q 1
done
}
#------------------------------------------------------------------------------#
# @brief: Main/default entry point.
function main()
{
# Make sure we always cleanup.
trap context_destroy EXIT
# Bring up interfaces.
context_create
# Enable health check responder.
health_check_init &
# Enter our worker loop.
context_loop
}
#------------------------------------------------------------------------------#
# Script is generally intended to be sourced and individual functions called.
# If just run as a standalone script, assume it's being used as the entrypoint
# for a Docker container.
if [ "${BASH_SOURCE[0]}" -ef "$0" ]; then
# Being run. Launch main.
main "${@}"
else
# Being sourced. Do nothing.
:
fi

View File

@ -0,0 +1,176 @@
#!/bin/bash
################################################################################
# @brief: Launcher/entrypoint script plus helper functions for "server
# side" container in the VPP testbench.
# @author: Matthew Giassa <mgiassa@cisco.com>
# @copyright: (C) Cisco 2021.
################################################################################
################################################################################
# Dependencies.
################################################################################
# Import common settings for server and client. This is supplied via the
# Dockerfile build.
# shellcheck disable=SC1091
. vpp_testbench_helpers.sh
################################################################################
# Globals.
################################################################################
# VPP instance socket.
export VPP_SOCK=/run/vpp/vpp.testbench-server.sock
# Alias for vppctl that uses the correct socket name.
export VPPCTL="vppctl -s ${VPP_SOCK}"
# Our "Docker bridge network". Don't change this value.
export NET_IF_DOCKER="eth0"
# Name of link associated with our VXLAN.
export LINK_VXLAN_LINUX="vxlan-vid-${VXLAN_ID_LINUX}"
################################################################################
# Function definitions.
################################################################################
#------------------------------------------------------------------------------#
# @brief: Alias for vppctl (knowing which API socket to use).
function vc()
{
vppctl -s "${VPP_SOCK}" "${@}"
}
#------------------------------------------------------------------------------#
# @brief: Used to initialize/configure the server container once it's up and
# running.
function context_create()
{
set -x
echo "Running server. Host: $(hostname)"
local mtu
# Setup VXLAN overlay.
ip link add "${LINK_VXLAN_LINUX}" \
type vxlan \
id "${VXLAN_ID_LINUX}" \
dstport "${VXLAN_PORT}" \
local "${SERVER_BRIDGE_IP_DOCKER}" \
group "${MC_VXLAN_ADDR_LINUX}" \
dev "${NET_IF_DOCKER}" \
ttl 1
ip link set "${LINK_VXLAN_LINUX}" up
ip addr add "${SERVER_VXLAN_IP_LINUX}/${MASK_VXLAN_LINUX}" dev "${LINK_VXLAN_LINUX}"
# Get MTU of interface. VXLAN must use a smaller value due to overhead.
mtu="$(cat /sys/class/net/${NET_IF_DOCKER}/mtu)"
# Decrease VXLAN MTU. This should already be handled for us by iproute2, but
# just being cautious.
ip link set dev "${LINK_VXLAN_LINUX}" mtu "$((mtu - 50))"
# Bring-up VPP and create tap interfaces and VXLAN tunnel.
vpp \
unix '{' log /tmp/vpp1.log full-coredump cli-listen ${VPP_SOCK} '}' \
api-segment '{' prefix vpp1 '}' \
api-trace '{' on '}' \
dpdk '{' uio-driver uio_pci_generic no-pci '}'
# Wait for VPP to come up.
while ! ${VPPCTL} show log; do
sleep 1
done
# Bring up the memif interface and assign an IP to it.
${VPPCTL} create interface memif id 0 master
sleep 1
${VPPCTL} set int state memif0/0 up
${VPPCTL} set int ip address memif0/0 "${SERVER_VPP_MEMIF_IP}/${VPP_MEMIF_NM}"
# Create VPP-controlled tap interface bridged to the memif.
${VPPCTL} create tap id 0 host-if-name vpp-tap-0
sleep 1
${VPPCTL} set interface state tap0 up
ip addr add "${SERVER_VPP_TAP_IP_MEMIF}/${VPP_TAP_NM}" dev vpp-tap-0
${VPPCTL} set interface l2 bridge tap0 "${VPP_BRIDGE_DOMAIN_TAP}"
${VPPCTL} set interface l2 bridge memif0/0 "${VPP_BRIDGE_DOMAIN_TAP}"
}
#------------------------------------------------------------------------------#
# @brief: Used to shutdown/cleanup the server container.
function context_destroy()
{
# OS will reclaim interfaces and resources when container is terminated.
:
}
#------------------------------------------------------------------------------#
# @brief: Server worker loop to keep the container alive. Just idles.
function context_loop()
{
# Sleep indefinitely (to keep container alive for testing).
tail -f /dev/null
}
#------------------------------------------------------------------------------#
# @brief: Launches a minimalistic web server via netcat. The Dockerfile
# associated with this project is configured to treat the web server
# replying with "200 OK" as a sort of simple health probe.
function health_check_init()
{
while true; do
echo -e "HTTP/1.1 200 OK\n\nHOST:$(hostname)\nDATE:$(date)" \
| nc -l -p "${DOCKER_HEALTH_PROBE_PORT}" -q 1
done
}
#------------------------------------------------------------------------------#
# @brief: Launches a minimalistic web server via netcat. This instance is
# meant to bind to the Linux VXLAN tunnel we create.
function web_server_vxlan_linux()
{
while true; do
echo -e "HTTP/1.1 200 OK\n\nHOST:$(hostname)\nDATE:$(date)\nHello from the Linux interface." \
| nc -l -s "${SERVER_VXLAN_IP_LINUX}" -p 8000 -q 1
done
}
#------------------------------------------------------------------------------#
# @brief: Launches a minimalistic web server via netcat. This instance is
# meant to bind to the VPP VXLAN tunnel we create.
function web_server_vpp_tap()
{
while true; do
echo -e "HTTP/1.1 200 OK\n\nHOST:$(hostname)\nDATE:$(date)\nHello from the VPP interface." \
| nc -l -s "${SERVER_VPP_TAP_IP_MEMIF}" -p 8000 -q 1
done
}
#------------------------------------------------------------------------------#
# @brief: Main/default entry point.
function main()
{
# Make sure we always cleanup.
trap context_destroy EXIT
# Bring up interfaces.
context_create
# Enable health check responder.
health_check_init &
# Bring up test web servers.
web_server_vxlan_linux &
web_server_vpp_tap &
# Enter our worker loop.
context_loop
}
#------------------------------------------------------------------------------#
# Script is generally intended to be sourced and individual functions called.
# If just run as a standalone script, assume it's being used as the entrypoint
# for a Docker container.
if [ "${BASH_SOURCE[0]}" -ef "$0" ]; then
# Being run. Launch main.
main "${@}"
else
# Being sourced. Do nothing.
:
fi

View File

@ -0,0 +1,273 @@
#!/bin/bash
################################################################################
# @brief: Helper functions for the VPP testbench project.
# NOTE: functions prefixed with "host_only" are functions
# intended to be executed on the host OS, **outside** of the
# Docker containers. These are typically functions for bring-up
# (i.e. creating the Docker networks, launching/terminating the
# Docker containers, etc.). If a function is not prefixed with
# "host_only", assume that the function/value/etc. is intended
# for use within the Docker containers. We could maybe re-factor
# this in the future so "host_only" functions live in a separate
# file.
# @author: Matthew Giassa <mgiassa@cisco.com>
# @copyright: (C) Cisco 2021.
################################################################################
# Meant to be sourced, not executed directly.
if [ "${BASH_SOURCE[0]}" -ef "$0" ]; then
echo "This script is intended to be sourced, not run. Aborting."
false
exit
fi
#------------------------------------------------------------------------------#
# For tests using the Linux kernel network stack.
#------------------------------------------------------------------------------#
# Health check probe port for all containers.
export DOCKER_HEALTH_PROBE_PORT="8123"
# Docker bridge network settings.
export CLIENT_BRIDGE_IP_DOCKER="169.254.0.1"
export SERVER_BRIDGE_IP_DOCKER="169.254.0.2"
export BRIDGE_NET_DOCKER="169.254.0.0/24"
export BRIDGE_GW_DOCKER="169.254.0.254"
# Overlay IP addresses.
export CLIENT_VXLAN_IP_LINUX="169.254.10.1"
export SERVER_VXLAN_IP_LINUX="169.254.10.2"
export MASK_VXLAN_LINUX="24"
export VXLAN_ID_LINUX="42"
# IANA (rather than Linux legacy port value).
export VXLAN_PORT="4789"
# Docker network we use to bridge containers.
export DOCKER_NET="vpp-testbench-net"
# Docker container names for client and server (runtime aliases).
export DOCKER_CLIENT_HOST="vpp-testbench-client"
export DOCKER_SERVER_HOST="vpp-testbench-server"
# Some related variables have to be computed at the last second, so they
# are not all defined up-front.
export CLIENT_VPP_NETNS_DST="/var/run/netns/${DOCKER_CLIENT_HOST}"
export SERVER_VPP_NETNS_DST="/var/run/netns/${DOCKER_SERVER_HOST}"
# VPP options.
# These can be arbitrarily named.
export CLIENT_VPP_HOST_IF="vpp1"
export SERVER_VPP_HOST_IF="vpp2"
# Putting VPP interfaces on separate subnet from Linux-stack i/f.
export CLIENT_VPP_MEMIF_IP="169.254.11.1"
export SERVER_VPP_MEMIF_IP="169.254.11.2"
export VPP_MEMIF_NM="24"
export CLIENT_VPP_TAP_IP_MEMIF="169.254.12.1"
export SERVER_VPP_TAP_IP_MEMIF="169.254.12.2"
export VPP_TAP_NM="24"
# Bridge domain ID (for VPP tap + VXLAN interfaces). Arbitrary.
export VPP_BRIDGE_DOMAIN_TAP="1000"
# VPP socket path. Make it one level "deeper" than the "/run/vpp" that is used
# by default, so our containers don't accidentally connect to an instance of
# VPP running on the host OS (i.e. "/run/vpp/vpp.sock"), and hang the system.
export VPP_SOCK_PATH="/run/vpp/containers"
#------------------------------------------------------------------------------#
# @brief: Converts an integer value representation of a VXLAN ID to a
# VXLAN IPv4 multicast address (string represenation). This
# effectively sets the first octet to "239" and the remaining 3x
# octets to the IP-address equivalent of a 24-bit value.
# Assumes that it's never supplied an input greater than what a
# 24-bit unsigned integer can hold.
function vxlan_id_to_mc_ip()
{
if [ $# -ne 1 ]; then
echo "Sanity failure."
false
exit
fi
local id="${1}"
local a b c d ret
a="239"
b="$(((id>>16) & 0xff))"
c="$(((id>>8) & 0xff))"
d="$(((id) & 0xff))"
ret="${a}.${b}.${c}.${d}"
echo "${ret}"
true
}
# Multicast address for VXLAN. Treat the lower three octets as the 24-bit
# representation of the VXLAN ID for ease-of-use (use-case specific, not
# necessarily an established rule/protocol).
MC_VXLAN_ADDR_LINUX="$(vxlan_id_to_mc_ip ${VXLAN_ID_LINUX})"
export MC_VXLAN_ADDR_LINUX
#------------------------------------------------------------------------------#
# @brief: Get'er function (so makefile can re-use common values from this
# script, and propagate them down to the Docker build operations
# and logic within the Dockerfile; "DRY").
function host_only_get_docker_health_probe_port()
{
echo "${DOCKER_HEALTH_PROBE_PORT}"
}
#------------------------------------------------------------------------------#
# @brief: Creates the Docker bridge network used to connect the
# client and server testbench containers.
function host_only_create_docker_networks()
{
# Create network (bridge for VXLAN). Don't touch 172.16/12 subnet, as
# Docker uses it by default for its own overlay functionality.
docker network create \
--driver bridge \
--subnet=${BRIDGE_NET_DOCKER} \
--gateway=${BRIDGE_GW_DOCKER} \
"${DOCKER_NET}"
}
#------------------------------------------------------------------------------#
# @brief: Destroys the Docker bridge network for connecting the
# containers.
function host_only_destroy_docker_networks()
{
docker network rm "${DOCKER_NET}" || true
}
#------------------------------------------------------------------------------#
# @brief: Bringup/dependency helper for VPP.
function host_only_create_vpp_deps()
{
# Create area for VPP sockets and mount points, if it doesn't already
# exist. Our containers need access to this path so they can see each
# others' respective sockets so we can bind them together via memif.
sudo mkdir -p "${VPP_SOCK_PATH}"
}
#------------------------------------------------------------------------------#
# @brief: Launches the testbench client container.
function host_only_run_testbench_client_container()
{
# Sanity check.
if [ $# -ne 1 ]; then
echo "Sanity failure."
false
exit
fi
# Launch container. Mount the local PWD into the container too (so we can
# backup results).
local image_name="${1}"
docker run -d --rm \
--cap-add=NET_ADMIN \
--cap-add=SYS_NICE \
--cap-add=SYS_PTRACE \
--device=/dev/net/tun:/dev/net/tun \
--device=/dev/vfio/vfio:/dev/vfio/vfio \
--device=/dev/vhost-net:/dev/vhost-net \
--name "${DOCKER_CLIENT_HOST}" \
--volume="$(pwd):/work:rw" \
--volume="${VPP_SOCK_PATH}:/run/vpp:rw" \
--network name="${DOCKER_NET},ip=${CLIENT_BRIDGE_IP_DOCKER}" \
--workdir=/work \
"${image_name}"
}
#------------------------------------------------------------------------------#
# @brief: Launches the testbench server container.
function host_only_run_testbench_server_container()
{
# Sanity check.
if [ $# -ne 1 ]; then
echo "Sanity failure."
false
exit
fi
# Launch container. Mount the local PWD into the container too (so we can
# backup results).
local image_name="${1}"
docker run -d --rm \
--cap-add=NET_ADMIN \
--cap-add=SYS_NICE \
--cap-add=SYS_PTRACE \
--device=/dev/net/tun:/dev/net/tun \
--device=/dev/vfio/vfio:/dev/vfio/vfio \
--device=/dev/vhost-net:/dev/vhost-net \
--name "${DOCKER_SERVER_HOST}" \
--volume="${VPP_SOCK_PATH}:/run/vpp:rw" \
--network name="${DOCKER_NET},ip=${SERVER_BRIDGE_IP_DOCKER}" \
"${image_name}"
}
#------------------------------------------------------------------------------#
# @brief: Terminates the testbench client container.
function host_only_kill_testbench_client_container()
{
docker kill "${DOCKER_CLIENT_HOST}" || true
docker rm "${DOCKER_CLIENT_HOST}" || true
}
#------------------------------------------------------------------------------#
# @brief: Terminates the testbench server container.
function host_only_kill_testbench_server_container()
{
docker kill "${DOCKER_SERVER_HOST}" || true
docker rm "${DOCKER_SERVER_HOST}" || true
}
#------------------------------------------------------------------------------#
# @brief: Launches an interactive shell in the client container.
function host_only_shell_client_container()
{
docker exec -it "${DOCKER_CLIENT_HOST}" bash --init-file /entrypoint.sh
}
#------------------------------------------------------------------------------#
# @brief: Launches an interactive shell in the server container.
function host_only_shell_server_container()
{
docker exec -it "${DOCKER_SERVER_HOST}" bash --init-file /entrypoint.sh
}
#------------------------------------------------------------------------------#
# @brief: Determines the network namespace or "netns" associated with a
# running Docker container, and then creates a network interface
# in the default/host netns, and moves it into the netns
# associated with the container.
function host_only_move_host_interfaces_into_container()
{
# NOTE: this is only necessary if we want to create Linux network
# interfaces while working in the default namespace, and then move them
# into container network namespaces.
# In earlier versions of this code, we did such an operation, but now we
# just create the interfaces inside the containers themselves (requires
# CAP_NET_ADMIN, or privileged containers, which we avoid). This is left
# here as it's occasionally useful for debug purposes (or might become a
# mini-lab itself).
# Make sure netns path exists.
sudo mkdir -p /var/run/netns
# Mount container network namespaces so that they are accessible via "ip
# netns". Ignore "START_OF_SCRIPT": just used to make
# linter-compliant text indentation look nicer.
DOCKER_CLIENT_PID=$(docker inspect -f '{{.State.Pid}}' ${DOCKER_CLIENT_HOST})
DOCKER_SERVER_PID=$(docker inspect -f '{{.State.Pid}}' ${DOCKER_SERVER_HOST})
CLIENT_VPP_NETNS_SRC=/proc/${DOCKER_CLIENT_PID}/ns/net
SERVER_VPP_NETNS_SRC=/proc/${DOCKER_SERVER_PID}/ns/net
sudo ln -sfT "${CLIENT_VPP_NETNS_SRC}" "${CLIENT_VPP_NETNS_DST}"
sudo ln -sfT "${SERVER_VPP_NETNS_SRC}" "${SERVER_VPP_NETNS_DST}"
# Move these interfaces into the namespaces of the containers and assign an
# IPv4 address to them.
sudo ip link set dev "${CLIENT_VPP_HOST_IF}" netns "${DOCKER_CLIENT_NETNS}"
sudo ip link set dev "${SERVER_VPP_HOST_IF}" netns "${DOCKER_SERVER_NETNS}"
docker exec ${DOCKER_CLIENT_HOST} ip a
docker exec ${DOCKER_SERVER_HOST} ip a
# Bring up the links and assign IP addresses. This must be done
# **after** moving the interfaces to a new netns, as we might have a
# hypothetical use case where we assign the same IP to multiple
# interfaces, which would be a problem. This collision issue isn't a
# problem though if the interfaces are in separate network namespaces
# though.
}