Compare commits
26 Commits
v18.07.1
...
v17.10-rc2
Author | SHA1 | Date | |
---|---|---|---|
|
cf6c343710 | ||
|
e0c078aa2a | ||
|
87f1411722 | ||
|
93b7822ddc | ||
|
86c0446e28 | ||
|
4ca58265a0 | ||
|
dfa0d20405 | ||
|
8c5bfbcc09 | ||
|
dea5881815 | ||
|
76c37d2d84 | ||
|
fc7e711003 | ||
|
67700d4116 | ||
|
a5081a7ac3 | ||
|
e691345d7a | ||
|
79bfbae54c | ||
|
f3d627dd3c | ||
|
de6c03c0f1 | ||
|
77d7dcba32 | ||
|
8ec5f4d742 | ||
|
dab7eb87bc | ||
|
780af45905 | ||
|
bd70c2f2e3 | ||
|
1808f3c00a | ||
|
3fd57e6753 | ||
|
d84f2ef54a | ||
|
7ea28045aa |
@ -2,3 +2,4 @@
|
||||
host=gerrit.fd.io
|
||||
port=29418
|
||||
project=vpp
|
||||
defaultbranch=stable/1710
|
||||
|
13
Makefile
13
Makefile
@ -81,13 +81,16 @@ endif
|
||||
RPM_DEPENDS = redhat-lsb glibc-static java-1.8.0-openjdk-devel yum-utils
|
||||
RPM_DEPENDS += apr-devel
|
||||
RPM_DEPENDS += numactl-devel
|
||||
RPM_DEPENDS += check
|
||||
RPM_DEPENDS += check check-devel
|
||||
|
||||
ifeq ($(OS_ID)-$(OS_VERSION_ID),fedora-25)
|
||||
RPM_DEPENDS += subunit subunit-devel
|
||||
RPM_DEPENDS += openssl-devel
|
||||
RPM_DEPENDS += python-devel
|
||||
RPM_DEPENDS += python2-virtualenv
|
||||
RPM_DEPENDS_GROUPS = 'C Development Tools and Libraries'
|
||||
else ifeq ($(shell if [ "$(OS_ID)" = "fedora" ]; then test $(OS_VERSION_ID) -gt 25; echo $$?; fi),0)
|
||||
RPM_DEPENDS += subunit subunit-devel
|
||||
RPM_DEPENDS += compat-openssl10-devel
|
||||
RPM_DEPENDS += python2-devel
|
||||
RPM_DEPENDS += python2-virtualenv
|
||||
@ -110,13 +113,7 @@ endif
|
||||
|
||||
RPM_SUSE_DEPENDS = autoconf automake bison ccache chrpath distribution-release gcc6 glibc-devel-static
|
||||
RPM_SUSE_DEPENDS += java-1_8_0-openjdk-devel libopenssl-devel libtool make openssl-devel
|
||||
RPM_SUSE_DEPENDS += python-devel python3-devel python-pip python3-pip python-rpm-macros shadow nasm libnuma-devel python3
|
||||
|
||||
ifeq ($(filter rhel centos,$(OS_ID)),$(OS_ID))
|
||||
RPM_DEPENDS += python34
|
||||
else
|
||||
RPM_DEPENDS += python3
|
||||
endif
|
||||
RPM_SUSE_DEPENDS += python-devel python-pip python-rpm-macros shadow nasm libnuma-devel
|
||||
|
||||
ifneq ($(wildcard $(STARTUP_DIR)/startup.conf),)
|
||||
STARTUP_CONF ?= $(STARTUP_DIR)/startup.conf
|
||||
|
15
README.md
15
README.md
@ -30,10 +30,11 @@ Directory name | Description
|
||||
build-root | Build output directory
|
||||
doxygen | Documentation generator configuration
|
||||
dpdk | DPDK patches and build infrastructure
|
||||
@ref src | VPP source code
|
||||
@ref extras/libmemif | Client library for memif
|
||||
@ref src/examples | VPP example code
|
||||
@ref src/plugins | VPP bundled plugins directory
|
||||
@ref src/svm | Shared virtual memory allocation library
|
||||
src/tests | Unit tests
|
||||
src/tests | Standalone tests (not part of test harness)
|
||||
src/vat | VPP API test program
|
||||
@ref src/vlib | VPP application library
|
||||
@ref src/vlibapi | VPP API library
|
||||
@ -43,9 +44,8 @@ Directory name | Description
|
||||
@ref src/vpp | VPP application
|
||||
@ref src/vpp-api | VPP application API bindings
|
||||
@ref src/vppinfra | VPP core library
|
||||
test | Unit tests
|
||||
@ref src/vpp/api | Not-yet-relocated API bindings
|
||||
@ref src/examples | VPP example code
|
||||
test | Unit tests and Python test harness
|
||||
|
||||
## Getting started
|
||||
|
||||
@ -95,12 +95,13 @@ for this can be found [on the Setting up Vagrant wiki page]
|
||||
## More information
|
||||
|
||||
Several modules provide documentation, see @subpage user_doc for more
|
||||
information.
|
||||
end-user-oriented information. Also see @subpage dev_doc for developer notes.
|
||||
|
||||
Visit the [VPP wiki](https://wiki.fd.io/view/VPP) for details on more
|
||||
advanced building strategies and development notes.
|
||||
advanced building strategies and other development notes.
|
||||
|
||||
|
||||
## Test Framework
|
||||
|
||||
There is PyDoc generated documentation available for the VPP test framework. See @subpage test_framework_doc for details.
|
||||
There is PyDoc generated documentation available for the VPP test framework.
|
||||
See @ref test_framework_doc for details.
|
||||
|
@ -62,7 +62,8 @@ DOXY_INPUT ?= \
|
||||
$(wildcard $(WS_ROOT)/*.md) \
|
||||
$(wildcard $(DOXY_DIR)/*.md) \
|
||||
$(DOXY_SRC_DIRECTORIES) \
|
||||
$(DOXY_SRC)/plugins
|
||||
$(DOXY_SRC)/plugins \
|
||||
extras
|
||||
|
||||
# Strip leading workspace path from input names
|
||||
DOXY_INPUT := $(subst $(WS_ROOT)/,,$(DOXY_INPUT))
|
||||
@ -73,7 +74,6 @@ DOXY_INPUT := $(subst $(WS_ROOT)/,,$(DOXY_INPUT))
|
||||
# there's a DPDK equivalent that conflicts.
|
||||
# These must be left-anchored paths for the regexp below to work.
|
||||
DOXY_EXCLUDE ?= \
|
||||
$(DOXY_SRC)/vlib/buffer.c \
|
||||
$(DOXY_SRC)/vpp-api/lua
|
||||
|
||||
# Generate a regexp for filenames to exclude
|
||||
|
@ -45,3 +45,9 @@ pre {
|
||||
a.el {
|
||||
font-family: Consolas, Courier, monospace;
|
||||
}
|
||||
|
||||
div.fragment {
|
||||
padding: 2px;
|
||||
margin-left: 8px;
|
||||
}
|
||||
|
||||
|
12
doxygen/dev_doc.md
Normal file
12
doxygen/dev_doc.md
Normal file
@ -0,0 +1,12 @@
|
||||
Developer Documentation {#dev_doc}
|
||||
=======================
|
||||
|
||||
Programming notes for developers.
|
||||
|
||||
- @subpage test_framework_doc
|
||||
- @subpage sample_plugin_doc
|
||||
- @subpage api_doc
|
||||
- @subpage vapi_doc
|
||||
- @subpage acl_hash_lookup
|
||||
- @subpage acl_multicore
|
||||
- @subpage libmemif_doc
|
@ -882,7 +882,8 @@ EXAMPLE_RECURSIVE = NO
|
||||
# that contain images that are to be included in the documentation (see the
|
||||
# \image command).
|
||||
|
||||
IMAGE_PATH = $(ROOT)/doxygen/assets
|
||||
IMAGE_PATH = $(ROOT)/doxygen/assets \
|
||||
$(ROOT)/extras/libmemif/docs
|
||||
|
||||
# The INPUT_FILTER tag can be used to specify a program that doxygen should
|
||||
# invoke to filter for each input file. Doxygen will invoke the filter program
|
||||
|
@ -3,5 +3,6 @@ Test Framework Documentation {#test_framework_doc}
|
||||
|
||||
PyDoc generated documentation for the "make test" framework is available for the following releases
|
||||
|
||||
- [Test framework documentation for VPP 17.10](https://docs.fd.io/vpp/17.10/vpp_make_test/html)
|
||||
- [Test framework documentation for VPP 17.04](https://docs.fd.io/vpp/17.04/vpp_make_test/html)
|
||||
- [Test framework documentation for VPP 17.01](https://docs.fd.io/vpp/17.01/vpp_make_test/html)
|
||||
|
@ -16,6 +16,4 @@ Several modules provide operational, dataplane-user focused documentation.
|
||||
- @subpage span_doc
|
||||
- @subpage srv6_doc
|
||||
- @subpage srmpls_doc
|
||||
- @subpage sample_plugin_doc
|
||||
- @subpage nat64_doc
|
||||
- @subpage api_doc
|
||||
|
@ -1,4 +1,4 @@
|
||||
## Build Instructions
|
||||
## Build Instructions {#libmemif_build_doc}
|
||||
|
||||
Install dependencies
|
||||
```
|
||||
@ -49,6 +49,7 @@ commands:
|
||||
ip-set <index> <ip-addr> - set interface ip address
|
||||
rx-mode <index> <qid> <polling|interrupt> - set queue rx mode
|
||||
```
|
||||
|
||||
#### Examples
|
||||
|
||||
Once the library is build/installed, refer to [Examples](../examples/README.md) and [Getting started](GettingStarted.md) for additional information on basic use cases and API usage.
|
||||
Once the library is built/installed, refer to @ref libmemif_examples_doc and @ref libmemif_gettingstarted_doc for additional information on basic use cases and API usage.
|
@ -1,8 +1,8 @@
|
||||
## Getting started
|
||||
## Getting started {#libmemif_gettingstarted_doc}
|
||||
|
||||
#### Concept (Connecting to VPP)
|
||||
|
||||
For detailed information on api calls and structures please refer to [libmemif.h](../src/libmemif.h)
|
||||
For detailed information on api calls and structures please refer to @ref libmemif.h.
|
||||
|
||||
1. Initialize memif
|
||||
- Declare callback function handling file descriptor event polling.
|
||||
@ -152,7 +152,8 @@ if (err != MEMIF_ERR_SUCCESS)
|
||||
|
||||
#### Example app (libmemif fd event polling):
|
||||
|
||||
- [ICMP Responder](../examples/icmp_responder/main.c)
|
||||
- @ref extras/libmemif/examples/icmp_responder
|
||||
|
||||
> Optional argument: transmit queue id.
|
||||
```
|
||||
icmpr 1
|
||||
@ -162,11 +163,14 @@ icmpr 1
|
||||
|
||||
#### Example app:
|
||||
|
||||
- [ICMP Responder custom fd event polling](../examples/icmp_responder-epoll/main.c)
|
||||
ICMP Responder custom fd event polling.
|
||||
|
||||
- @ref extras/libmemif/examples/icmp_responder-epoll
|
||||
|
||||
#### Example app (multi-thread queue polling)
|
||||
|
||||
- [ICMP Responder multi-thread](../examples/icmp_responder-mt/main.c)
|
||||
ICMP Responder multi-thread.
|
||||
- @ref extras/libmemif/examples/icmp_responder-mt
|
||||
|
||||
> Simple example of libmemif multi-thread usage. Connection establishment is handled by main thread. There are two rx queues in this example. One in polling mode and second in interrupt mode.
|
||||
|
@ -1,16 +0,0 @@
|
||||
## Examples
|
||||
|
||||
After build, root folder will contain scripts linking binary examples with library (same name as example apps). These scripts can be executed to run example apps without installing the library. Example apps binaries can be found in _libs_ filder. To run binaries directly, make sure that libmemif library is installed.
|
||||
|
||||
#### Run in container
|
||||
ligato/libmemif-sample-service image contains built and installed libmemf. To run different examples, override docker CMD to start container in bash:
|
||||
```
|
||||
# docker run -it --entrypoint=/bin/bash -i --rm --name icmp-responder --hostname icmp-responder --privileged -v "/run/vpp/:/run/vpp/" ligato/libmemif-sample-service
|
||||
```
|
||||
Current WORKDIR is set to root repository directory. Example apps can be run from this directory (a script linking binary with library), or browse to ./.libs folder and execute binary directly.
|
||||
|
||||
Example app | Description
|
||||
------------|------------
|
||||
[icmpr](../examples/icmp_responder/main.c) | Simplest implementaion. Event polling is handled by libmemif. Single memif conenction in slave mode is created (id 0). Use Ctrl + C to exit app. Memif receive mode: interrupt.
|
||||
[icmpr-epoll](../examples/icmp_responder-epoll/main.c) (run in container by default) | Supports multiple connections and master mode. User can create/delete connections, set ip addresses, print connection information. [Example setup](ExampleSetup.md) contains instructions on basic connection use cases setups. Memif receive mode: interrupt. App provides functionality to disable interrupts for specified queue/s for testing purposes. Polling mode is not implemented in this example.
|
||||
[icmpr-mt](../examples/icmp_responder-mt/main.c) | Multi-thread example, very similar to icmpr-epoll. Packets are handled in threads assigned to specific queues. Slave mode only. Memif receive mode: polling (memif_rx_poll function), interrupt (memif_rx_interrupt function). Receive modes differ per queue.
|
@ -1,11 +1,12 @@
|
||||
## Example setup
|
||||
## Example setup {#libmemif_example_setup_doc}
|
||||
|
||||
#### VPP-memif master icmp_responder slave
|
||||
|
||||
> Libmemif example app(s) use memif default socket file: /run/vpp/memif.sock.
|
||||
> Libmemif example app(s) use memif default socket file: `/run/vpp/memif.sock`.
|
||||
|
||||
Run VPP and icmpr-epoll example (default example when running in container).
|
||||
> Other examples work similar to icmpr-epoll. Brief explanation can be found in [Examples readme](README.md) file.
|
||||
|
||||
> Other examples work similar to icmpr-epoll. Brief explanation can be found in @ref libmemif_examples_doc .
|
||||
|
||||
VPP-side config:
|
||||
```
|
18
extras/libmemif/examples/examples_doc.md
Normal file
18
extras/libmemif/examples/examples_doc.md
Normal file
@ -0,0 +1,18 @@
|
||||
## Examples {#libmemif_examples_doc}
|
||||
|
||||
After build, root folder will contain scripts linking binary examples with library (same name as example apps). These scripts can be executed to run example apps without installing the library. Example apps binaries can be found in _libs_ filder. To run binaries directly, make sure that libmemif library is installed.
|
||||
|
||||
#### Run in container
|
||||
|
||||
`ligato/libmemif-sample-service` image contains built and installed libmemf. To run different examples, override docker CMD to start container in bash:
|
||||
|
||||
```
|
||||
# docker run -it --entrypoint=/bin/bash -i --rm --name icmp-responder --hostname icmp-responder --privileged -v "/run/vpp/:/run/vpp/" ligato/libmemif-sample-service
|
||||
```
|
||||
Current WORKDIR is set to root repository directory. Example apps can be run from this directory (a script linking binary with library), or browse to `./.libs` folder and execute binary directly.
|
||||
|
||||
Example app | Description
|
||||
------------|------------
|
||||
@ref extras/libmemif/examples/icmp_responder | Simplest implementaion. Event polling is handled by libmemif. Single memif conenction in slave mode is created (id 0). Use Ctrl + C to exit app. Memif receive mode: interrupt.
|
||||
@ref extras/libmemif/examples/icmp_responder-epoll (run in container by default) | Supports multiple connections and master mode. User can create/delete connections, set ip addresses, print connection information. @ref libmemif_example_setup_doc contains instructions on basic connection use cases setups. Memif receive mode: interrupt. App provides functionality to disable interrupts for specified queue/s for testing purposes. Polling mode is not implemented in this example.
|
||||
@ref extras/libmemif/examples/icmp_responder-mt) | Multi-thread example, very similar to icmpr-epoll. Packets are handled in threads assigned to specific queues. Slave mode only. Memif receive mode: polling (memif_rx_poll function), interrupt (memif_rx_interrupt function). Receive modes differ per queue.
|
@ -1,5 +1,6 @@
|
||||
Shared Memory Packet Interface (memif) Library
|
||||
Shared Memory Packet Interface (memif) Library {#libmemif_doc}
|
||||
==============================================
|
||||
|
||||
## Introduction
|
||||
|
||||
Shared memory packet interface (memif) provides high performance packet transmit and receive between user application and Vector Packet Processing (VPP) or multiple user applications. Using libmemif, user application can create shared memory interface in master or slave mode and connect to VPP or another application using libmemif. Once the connection is established, user application can receive or transmit packets using libmemif API.
|
||||
@ -63,10 +64,11 @@ commands:
|
||||
rx-mode <index> <qid> <polling|interrupt> - set queue rx mode
|
||||
```
|
||||
|
||||
Continue with [Example setup](examples/ExampleSetup.md) which contains instructions on how to set up conenction between icmpr-epoll example app and VPP-memif.
|
||||
Continue with @ref libmemif_example_setup which contains instructions on how to set up conenction between icmpr-epoll example app and VPP-memif.
|
||||
|
||||
#### Next steps
|
||||
|
||||
- [Build instructions](docs/BuildInstructions.md) Instructions on how to build/install libmemif.
|
||||
- [Examples](examples/README.md) More example apps presenting different features.
|
||||
- [Getting started](docs/GettingStarted.md) Introduction to libmemif API. Explaining library usage in custom app.
|
||||
- @subpage libmemif_build_doc
|
||||
- @subpage libmemif_examples_doc
|
||||
- @subpage libmemif_example_setup_doc
|
||||
- @subpage libmemif_gettingstarted_doc
|
@ -31,6 +31,10 @@ Version: %{_version}
|
||||
Release: %{_release}
|
||||
Requires: vpp-lib = %{_version}-%{_release}, net-tools, pciutils, python
|
||||
BuildRequires: systemd, chrpath
|
||||
BuildRequires: check, check-devel
|
||||
%if 0%{?fedora} >= 25
|
||||
BuildRequires: subunit, subunit-devel
|
||||
%endif
|
||||
%if 0%{?fedora} >= 26
|
||||
BuildRequires: compat-openssl10-devel
|
||||
BuildRequires: python2-devel, python2-virtualenv
|
||||
@ -122,7 +126,7 @@ This package contains the python bindings for the vpp api
|
||||
%prep
|
||||
# Unpack into dir with longer name as work around of debugedit bug in in rpm-build 4.13
|
||||
rm -rf %{name}-%{_version}
|
||||
rm -rf %{_tmpbuild_dir}
|
||||
rm -rf %{_tmp_build_dir}
|
||||
/usr/bin/xz -dc '%{_sourcedir}/%{name}-%{_version}-%{_release}.tar.xz' | /usr/bin/tar -xf -
|
||||
mv %{name}-%{_version} %{_tmp_build_dir}
|
||||
cd '%{_tmp_build_dir}'
|
||||
|
@ -1,4 +1,4 @@
|
||||
ACL plugin constant-time lookup design
|
||||
ACL plugin constant-time lookup design {#acl_hash_lookup}
|
||||
======================================
|
||||
|
||||
The initial implementation of ACL plugin performs a trivial for() cycle,
|
@ -1,4 +1,4 @@
|
||||
Multicore support for ACL plugin
|
||||
Multicore support for ACL plugin {#acl_multicore}
|
||||
================================
|
||||
|
||||
This captures some considerations and design decisions that I have made,
|
||||
@ -20,16 +20,18 @@ at the time of replacing the old ACL being checked, with
|
||||
the new ACL.
|
||||
|
||||
In case an acl_add_replace is being used to replace the rules
|
||||
within the existing entry, a reallocation of am->acls[X].rules
|
||||
within the existing entry, a reallocation of `am->acls[X].rules`
|
||||
vector will happen and potentially a change in count.
|
||||
|
||||
acl_match_5tuple() has the following code:
|
||||
|
||||
```{.c}
|
||||
a = am->acls + acl_index;
|
||||
for (i = 0; i < a->count; i++)
|
||||
{
|
||||
r = a->rules + i;
|
||||
. . .
|
||||
```
|
||||
|
||||
Ideally we should be immune from a->rules changing,
|
||||
but the problem arises if the count changes in flight,
|
@ -290,6 +290,7 @@ memif_init_regions_and_queues (memif_if_t * mif)
|
||||
return err;
|
||||
|
||||
r->fd = alloc.fd;
|
||||
r->shm = alloc.addr;
|
||||
|
||||
for (i = 0; i < mif->run.num_s2m_rings; i++)
|
||||
{
|
||||
|
@ -523,11 +523,11 @@ snat_in2out_error_t icmp_get_key(ip4_header_t *ip0,
|
||||
* @param e optional parameter
|
||||
*/
|
||||
u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e)
|
||||
{
|
||||
ip4_header_t *ip0;
|
||||
icmp46_header_t *icmp0;
|
||||
u32 sw_if_index0;
|
||||
u32 rx_fib_index0;
|
||||
@ -537,13 +537,7 @@ u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
clib_bihash_kv_8_8_t kv0, value0;
|
||||
u32 next0 = ~0;
|
||||
int err;
|
||||
u32 iph_offset0 = 0;
|
||||
|
||||
if (PREDICT_FALSE(vnet_buffer(b0)->sw_if_index[VLIB_TX] != ~0))
|
||||
{
|
||||
iph_offset0 = vnet_buffer (b0)->ip.save_rewrite_length;
|
||||
}
|
||||
ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) + iph_offset0);
|
||||
icmp0 = (icmp46_header_t *) ip4_next_header (ip0);
|
||||
sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
|
||||
rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
|
||||
@ -622,11 +616,11 @@ out:
|
||||
* @param e optional parameter
|
||||
*/
|
||||
u32 icmp_match_in2out_fast(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e)
|
||||
{
|
||||
ip4_header_t *ip0;
|
||||
icmp46_header_t *icmp0;
|
||||
u32 sw_if_index0;
|
||||
u32 rx_fib_index0;
|
||||
@ -637,7 +631,6 @@ u32 icmp_match_in2out_fast(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 next0 = ~0;
|
||||
int err;
|
||||
|
||||
ip0 = vlib_buffer_get_current (b0);
|
||||
icmp0 = (icmp46_header_t *) ip4_next_header (ip0);
|
||||
sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
|
||||
rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
|
||||
@ -715,7 +708,7 @@ static inline u32 icmp_in2out (snat_main_t *sm,
|
||||
|
||||
echo0 = (icmp_echo_header_t *)(icmp0+1);
|
||||
|
||||
next0_tmp = sm->icmp_match_in2out_cb(sm, node, thread_index, b0,
|
||||
next0_tmp = sm->icmp_match_in2out_cb(sm, node, thread_index, b0, ip0,
|
||||
&protocol, &sm0, &dont_translate, d, e);
|
||||
if (next0_tmp != ~0)
|
||||
next0 = next0_tmp;
|
||||
@ -2919,11 +2912,11 @@ VLIB_NODE_FUNCTION_MULTIARCH (snat_det_in2out_node, snat_det_in2out_node_fn);
|
||||
* @param e optional parameter
|
||||
*/
|
||||
u32 icmp_match_in2out_det(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e)
|
||||
{
|
||||
ip4_header_t *ip0;
|
||||
icmp46_header_t *icmp0;
|
||||
u32 sw_if_index0;
|
||||
u32 rx_fib_index0;
|
||||
@ -2942,7 +2935,6 @@ u32 icmp_match_in2out_det(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
ip4_address_t in_addr;
|
||||
u16 in_port;
|
||||
|
||||
ip0 = vlib_buffer_get_current (b0);
|
||||
icmp0 = (icmp46_header_t *) ip4_next_header (ip0);
|
||||
echo0 = (icmp_echo_header_t *)(icmp0+1);
|
||||
sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
|
||||
|
@ -257,6 +257,7 @@ typedef u32 snat_icmp_match_function_t (struct snat_main_s *sm,
|
||||
vlib_node_runtime_t *node,
|
||||
u32 thread_index,
|
||||
vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0,
|
||||
u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate,
|
||||
@ -454,27 +455,33 @@ typedef struct {
|
||||
} tcp_udp_header_t;
|
||||
|
||||
u32 icmp_match_in2out_fast(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e);
|
||||
u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e);
|
||||
u32 icmp_match_in2out_det(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e);
|
||||
u32 icmp_match_out2in_fast(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e);
|
||||
u32 icmp_match_out2in_slow(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e);
|
||||
u32 icmp_match_out2in_det(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e);
|
||||
void increment_v4_address(ip4_address_t * a);
|
||||
|
@ -297,11 +297,11 @@ snat_out2in_error_t icmp_get_key(ip4_header_t *ip0,
|
||||
* @param e optional parameter
|
||||
*/
|
||||
u32 icmp_match_out2in_slow(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e)
|
||||
{
|
||||
ip4_header_t *ip0;
|
||||
icmp46_header_t *icmp0;
|
||||
u32 sw_if_index0;
|
||||
u32 rx_fib_index0;
|
||||
@ -314,7 +314,6 @@ u32 icmp_match_out2in_slow(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 next0 = ~0;
|
||||
int err;
|
||||
|
||||
ip0 = vlib_buffer_get_current (b0);
|
||||
icmp0 = (icmp46_header_t *) ip4_next_header (ip0);
|
||||
sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
|
||||
rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
|
||||
@ -408,11 +407,11 @@ out:
|
||||
* @param e optional parameter
|
||||
*/
|
||||
u32 icmp_match_out2in_fast(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e)
|
||||
{
|
||||
ip4_header_t *ip0;
|
||||
icmp46_header_t *icmp0;
|
||||
u32 sw_if_index0;
|
||||
u32 rx_fib_index0;
|
||||
@ -423,7 +422,6 @@ u32 icmp_match_out2in_fast(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 next0 = ~0;
|
||||
int err;
|
||||
|
||||
ip0 = vlib_buffer_get_current (b0);
|
||||
icmp0 = (icmp46_header_t *) ip4_next_header (ip0);
|
||||
sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
|
||||
rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
|
||||
@ -494,7 +492,7 @@ static inline u32 icmp_out2in (snat_main_t *sm,
|
||||
|
||||
echo0 = (icmp_echo_header_t *)(icmp0+1);
|
||||
|
||||
next0_tmp = sm->icmp_match_out2in_cb(sm, node, thread_index, b0,
|
||||
next0_tmp = sm->icmp_match_out2in_cb(sm, node, thread_index, b0, ip0,
|
||||
&protocol, &sm0, &dont_translate, d, e);
|
||||
if (next0_tmp != ~0)
|
||||
next0 = next0_tmp;
|
||||
@ -2038,11 +2036,11 @@ VLIB_NODE_FUNCTION_MULTIARCH (snat_det_out2in_node, snat_det_out2in_node_fn);
|
||||
* @param e optional parameter
|
||||
*/
|
||||
u32 icmp_match_out2in_det(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
u32 thread_index, vlib_buffer_t *b0, u8 *p_proto,
|
||||
u32 thread_index, vlib_buffer_t *b0,
|
||||
ip4_header_t *ip0, u8 *p_proto,
|
||||
snat_session_key_t *p_value,
|
||||
u8 *p_dont_translate, void *d, void *e)
|
||||
{
|
||||
ip4_header_t *ip0;
|
||||
icmp46_header_t *icmp0;
|
||||
u32 sw_if_index0;
|
||||
u8 protocol;
|
||||
@ -2058,7 +2056,6 @@ u32 icmp_match_out2in_det(snat_main_t *sm, vlib_node_runtime_t *node,
|
||||
snat_det_session_t * ses0 = 0;
|
||||
ip4_address_t out_addr;
|
||||
|
||||
ip0 = vlib_buffer_get_current (b0);
|
||||
icmp0 = (icmp46_header_t *) ip4_next_header (ip0);
|
||||
echo0 = (icmp_echo_header_t *)(icmp0+1);
|
||||
sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
|
||||
|
@ -6,12 +6,12 @@ APIs.
|
||||
|
||||
Messages are defined in `*.api` files. Today, there are about 50 api files,
|
||||
with more arriving as folks add programmable features. The API file compiler
|
||||
sources reside in @ref src/tools/vppapigen .
|
||||
sources reside in @ref src/tools/vppapigen.
|
||||
|
||||
Here's a typical request/response message definition, from
|
||||
@ref src/vnet/interface.api :
|
||||
From @ref src/vnet/interface.api, here's a typical request/response message
|
||||
definition:
|
||||
|
||||
```
|
||||
```{.c}
|
||||
autoreply define sw_interface_set_flags
|
||||
{
|
||||
u32 client_index;
|
||||
@ -22,10 +22,10 @@ Here's a typical request/response message definition, from
|
||||
};
|
||||
```
|
||||
|
||||
To a first approximation, the API compiler renders this definition as
|
||||
follows:
|
||||
To a first approximation, the API compiler renders this definition into
|
||||
`build-root/.../vpp/include/vnet/interface.api.h` as follows:
|
||||
|
||||
```
|
||||
```{.c}
|
||||
/****** Message ID / handler enum ******/
|
||||
#ifdef vl_msg_id
|
||||
vl_msg_id(VL_API_SW_INTERFACE_SET_FLAGS, vl_api_sw_interface_set_flags_t_handler)
|
||||
@ -60,10 +60,13 @@ follows:
|
||||
u32 context;
|
||||
i32 retval;
|
||||
}) vl_api_sw_interface_set_flags_reply_t;
|
||||
|
||||
...
|
||||
#endif /* vl_typedefs */
|
||||
```
|
||||
|
||||
To change the admin state of an interface, a binary api client sends a
|
||||
@ref vl_api_sw_interface_set_flags_t to vpp, which will respond with a
|
||||
@ref vl_api_sw_interface_set_flags_t to VPP, which will respond with a
|
||||
@ref vl_api_sw_interface_set_flags_reply_t message.
|
||||
|
||||
Multiple layers of software, transport types, and shared libraries
|
||||
@ -76,7 +79,7 @@ implement a variety of features:
|
||||
message handlers.
|
||||
|
||||
Correctly-coded message handlers know nothing about the transport used to
|
||||
deliver messages to/from vpp. It's reasonably straighforward to use multiple
|
||||
deliver messages to/from VPP. It's reasonably straighforward to use multiple
|
||||
API message transport types simultaneously.
|
||||
|
||||
For historical reasons, binary api messages are (putatively) sent in network
|
||||
@ -90,12 +93,12 @@ Since binary API messages are always processed in order, we allocate messages
|
||||
using a ring allocator whenever possible. This scheme is extremely fast when
|
||||
compared with a traditional memory allocator, and doesn't cause heap
|
||||
fragmentation. See
|
||||
@ref src/vlibmemory/memory_shared.c @ref vl_msg_api_alloc_internal() .
|
||||
@ref src/vlibmemory/memory_shared.c @ref vl_msg_api_alloc_internal().
|
||||
|
||||
Regardless of transport, binary api messages always follow a @ref msgbuf_t
|
||||
header:
|
||||
|
||||
```
|
||||
```{.c}
|
||||
typedef struct msgbuf_
|
||||
{
|
||||
unix_shared_memory_queue_t *q;
|
||||
@ -109,7 +112,7 @@ This structure makes it easy to trace messages without having to
|
||||
decode them - simply save data_len bytes - and allows
|
||||
@ref vl_msg_api_free() to rapidly dispose of message buffers:
|
||||
|
||||
```
|
||||
```{.c}
|
||||
void
|
||||
vl_msg_api_free (void *a)
|
||||
{
|
||||
@ -130,34 +133,34 @@ decode them - simply save data_len bytes - and allows
|
||||
return;
|
||||
}
|
||||
<snip>
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Message Tracing and Replay
|
||||
|
||||
It's extremely important that vpp can capture and replay sizeable binary API
|
||||
It's extremely important that VPP can capture and replay sizeable binary API
|
||||
traces. System-level issues involving hundreds of thousands of API
|
||||
transactions can be re-run in a second or less. Partial replay allows one to
|
||||
binary-search for the point where the wheels fall off. One can add scaffolding
|
||||
to the data plane, to trigger when complex conditions obtain.
|
||||
|
||||
With binary API trace, print, and replay, system-level bug reports of the form
|
||||
"after 300,000 API transactions, the vpp data-plane stopped forwarding
|
||||
"after 300,000 API transactions, the VPP data-plane stopped forwarding
|
||||
traffic, FIX IT!" can be solved offline.
|
||||
|
||||
More often than not, one discovers that a control-plane client
|
||||
misprograms the data plane after a long time or under complex
|
||||
circumstances. Without direct evidence, "it's a data-plane problem!"
|
||||
|
||||
See @ref src/vlibmemory/memory_vlib.c @ref vl_msg_api_process_file() ,
|
||||
and @ref src/vlibapi/api_shared.c . See also the debug CLI command "api trace"
|
||||
See @ref src/vlibmemory/memory_vlib.c @ref vl_msg_api_process_file(),
|
||||
and @ref src/vlibapi/api_shared.c. See also the debug CLI command "api trace"
|
||||
|
||||
## Client connection details
|
||||
|
||||
Establishing a binary API connection to vpp from a C-language client
|
||||
Establishing a binary API connection to VPP from a C-language client
|
||||
is easy:
|
||||
|
||||
```
|
||||
```{.c}
|
||||
int
|
||||
connect_to_vpe (char *client_name, int client_message_queue_length)
|
||||
{
|
||||
@ -176,9 +179,9 @@ is easy:
|
||||
}
|
||||
```
|
||||
|
||||
32 is a typical value for client_message_queue_length. Vpp cannot
|
||||
32 is a typical value for client_message_queue_length. VPP cannot
|
||||
block when it needs to send an API message to a binary API client, and
|
||||
the vpp-side binary API message handlers are very fast. When sending
|
||||
the VPP-side binary API message handlers are very fast. When sending
|
||||
asynchronous messages, make sure to scrape the binary API rx ring with
|
||||
some enthusiasm.
|
||||
|
||||
@ -187,7 +190,7 @@ some enthusiasm.
|
||||
Calling @ref vl_client_connect_to_vlib spins up a binary API message RX
|
||||
pthread:
|
||||
|
||||
```
|
||||
```{.c}
|
||||
static void *
|
||||
rx_thread_fn (void *arg)
|
||||
{
|
||||
@ -214,31 +217,31 @@ To handle the binary API message queue yourself, use
|
||||
@ref vl_client_connect_to_vlib_no_rx_pthread.
|
||||
|
||||
In turn, vl_msg_api_queue_handler(...) uses mutex/condvar signalling
|
||||
to wake up, process vpp -> client traffic, then sleep. Vpp supplies a
|
||||
condvar broadcast when the vpp -> client API message queue transitions
|
||||
to wake up, process VPP -> client traffic, then sleep. VPP supplies a
|
||||
condvar broadcast when the VPP -> client API message queue transitions
|
||||
from empty to nonempty.
|
||||
|
||||
Vpp checks its own binary API input queue at a very high rate. Vpp
|
||||
VPP checks its own binary API input queue at a very high rate. VPP
|
||||
invokes message handlers in "process" context [aka cooperative
|
||||
multitasking thread context] at a variable rate, depending on
|
||||
data-plane packet processing requirements.
|
||||
|
||||
## Client disconnection details
|
||||
|
||||
To disconnect from vpp, call @ref vl_client_disconnect_from_vlib
|
||||
. Please arrange to call this function if the client application
|
||||
terminates abnormally. Vpp makes every effort to hold a decent funeral
|
||||
for dead clients, but vpp can't guarantee to free leaked memory in the
|
||||
To disconnect from VPP, call @ref vl_client_disconnect_from_vlib.
|
||||
Please arrange to call this function if the client application
|
||||
terminates abnormally. VPP makes every effort to hold a decent funeral
|
||||
for dead clients, but VPP can't guarantee to free leaked memory in the
|
||||
shared binary API segment.
|
||||
|
||||
## Sending binary API messages to vpp
|
||||
## Sending binary API messages to VPP
|
||||
|
||||
The point of the exercise is to send binary API messages to vpp, and
|
||||
to receive replies from vpp. Many vpp binary APIs comprise a client
|
||||
The point of the exercise is to send binary API messages to VPP, and
|
||||
to receive replies from VPP. Many VPP binary APIs comprise a client
|
||||
request message, and a simple status reply. For example, to
|
||||
set the admin status of an interface, one codes:
|
||||
|
||||
```
|
||||
```{.c}
|
||||
vl_api_sw_interface_set_flags_t *mp;
|
||||
|
||||
mp = vl_msg_api_alloc (sizeof (*mp));
|
||||
@ -262,9 +265,9 @@ Key points:
|
||||
network byte order
|
||||
|
||||
* The client-library global data structure @ref api_main keeps track
|
||||
of sufficient pointers and handles used to communicate with vpp
|
||||
of sufficient pointers and handles used to communicate with VPP
|
||||
|
||||
## Receiving binary API messages from vpp
|
||||
## Receiving binary API messages from VPP
|
||||
|
||||
Unless you've made other arrangements (see @ref
|
||||
vl_client_connect_to_vlib_no_rx_pthread), *messages are received on a
|
||||
@ -273,7 +276,7 @@ thread is the responsibility of the application!
|
||||
|
||||
Set up message handlers about as follows:
|
||||
|
||||
```
|
||||
```{.c}
|
||||
#define vl_typedefs /* define message structures */
|
||||
#include <vpp/api/vpe_all_api_h.h>
|
||||
#undef vl_typedefs
|
||||
@ -319,7 +322,7 @@ vectors in the @ref api_main_t structure. As of this writing: not all
|
||||
vector element values can be set through the API. You'll see sporadic
|
||||
API message registrations followed by minor adjustments of this form:
|
||||
|
||||
```
|
||||
```{.c}
|
||||
/*
|
||||
* Thread-safe API messages
|
||||
*/
|
||||
|
@ -115,7 +115,8 @@ _(BD_ID_EXCEED_MAX, -122, "Bridge domain ID exceed 16M limit") \
|
||||
_(SUBIF_DOESNT_EXIST, -123, "Subinterface doesn't exist") \
|
||||
_(L2_MACS_EVENT_CLINET_PRESENT, -124, "Client already exist for L2 MACs events") \
|
||||
_(INVALID_QUEUE, -125, "Invalid queue") \
|
||||
_(UNSUPPORTED, -126, "Unsupported")
|
||||
_(UNSUPPORTED, -126, "Unsupported") \
|
||||
_(DUPLICATE_IF_ADDRESS, -127, "Address already present on another interface")
|
||||
|
||||
typedef enum
|
||||
{
|
||||
|
@ -173,6 +173,7 @@ typedef struct
|
||||
u8 l2_len; /* ethernet header length */
|
||||
u8 shg; /* split-horizon group */
|
||||
u16 l2fib_sn; /* l2fib bd/int seq_num */
|
||||
u8 bd_age; /* aging enabled */
|
||||
} l2;
|
||||
|
||||
/* l2tpv3 softwire encap, only valid there */
|
||||
|
@ -125,6 +125,9 @@ send_ethernet_hello (cdp_main_t * cm, cdp_neighbor_t * n, int count)
|
||||
h0 = vlib_packet_template_get_packet
|
||||
(vm, &cm->packet_templates[n->packet_template_index], &bi0);
|
||||
|
||||
if (!h0)
|
||||
break;
|
||||
|
||||
/* Add the interface's ethernet source address */
|
||||
hw = vnet_get_sup_hw_interface (vnm, n->sw_if_index);
|
||||
|
||||
|
@ -86,6 +86,16 @@
|
||||
* The value 64 was obtained by testing (48 and 128 were not as good).
|
||||
*/
|
||||
#define VHOST_USER_RX_COPY_THRESHOLD 64
|
||||
/*
|
||||
* On the transmit side, we keep processing the buffers from vlib in the while
|
||||
* loop and prepare the copy order to be executed later. However, the static
|
||||
* array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
|
||||
* entries. In order to not corrupt memory, we have to do the copy when the
|
||||
* static array reaches the copy threshold. We subtract 40 in case the code
|
||||
* goes into the inner loop for a maximum of 64k frames which may require
|
||||
* more array entries.
|
||||
*/
|
||||
#define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 40)
|
||||
|
||||
#define UNIX_GET_FD(unixfd_idx) \
|
||||
(unixfd_idx != ~0) ? \
|
||||
@ -1545,8 +1555,10 @@ vhost_user_if_input (vlib_main_t * vm,
|
||||
* per packet. In case packets are bigger, we will just yeld at some point
|
||||
* in the loop and come back later. This is not an issue as for big packet,
|
||||
* processing cost really comes from the memory copy.
|
||||
* The assumption is that big packets will fit in 40 buffers.
|
||||
*/
|
||||
if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len < n_left + 1))
|
||||
if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len < n_left + 1 ||
|
||||
vum->cpus[thread_index].rx_buffers_len < 40))
|
||||
{
|
||||
u32 curr_len = vum->cpus[thread_index].rx_buffers_len;
|
||||
vum->cpus[thread_index].rx_buffers_len +=
|
||||
@ -1998,7 +2010,7 @@ vhost_user_tx (vlib_main_t * vm,
|
||||
|
||||
qid =
|
||||
VHOST_VRING_IDX_RX (*vec_elt_at_index
|
||||
(vui->per_cpu_tx_qid, vlib_get_thread_index ()));
|
||||
(vui->per_cpu_tx_qid, thread_index));
|
||||
rxvq = &vui->vrings[qid];
|
||||
if (PREDICT_FALSE (vui->use_tx_spinlock))
|
||||
vhost_user_vring_lock (vui, qid);
|
||||
@ -2210,6 +2222,27 @@ retry:
|
||||
}
|
||||
|
||||
n_left--; //At the end for error counting when 'goto done' is invoked
|
||||
|
||||
/*
|
||||
* Do the copy periodically to prevent
|
||||
* vum->cpus[thread_index].copy array overflow and corrupt memory
|
||||
*/
|
||||
if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
|
||||
{
|
||||
if (PREDICT_FALSE
|
||||
(vhost_user_tx_copy (vui, vum->cpus[thread_index].copy,
|
||||
copy_len, &map_hint)))
|
||||
{
|
||||
vlib_error_count (vm, node->node_index,
|
||||
VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
|
||||
}
|
||||
copy_len = 0;
|
||||
|
||||
/* give buffers back to driver */
|
||||
CLIB_MEMORY_BARRIER ();
|
||||
rxvq->used->idx = rxvq->last_used_idx;
|
||||
vhost_user_log_dirty_ring (vui, rxvq, idx);
|
||||
}
|
||||
buffers++;
|
||||
}
|
||||
|
||||
@ -2264,7 +2297,7 @@ done3:
|
||||
vlib_increment_simple_counter
|
||||
(vnet_main.interface_main.sw_if_counters
|
||||
+ VNET_INTERFACE_COUNTER_DROP,
|
||||
vlib_get_thread_index (), vui->sw_if_index, n_left);
|
||||
thread_index, vui->sw_if_index, n_left);
|
||||
}
|
||||
|
||||
vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
|
||||
|
@ -615,7 +615,7 @@ vnet_arp_set_ip4_over_ethernet_internal (vnet_main_t * vnm,
|
||||
*/
|
||||
if (0 == memcmp (e->ethernet_address,
|
||||
a->ethernet, sizeof (e->ethernet_address)))
|
||||
return -1;
|
||||
goto check_customers;
|
||||
|
||||
/* Update time stamp and ethernet address. */
|
||||
clib_memcpy (e->ethernet_address, a->ethernet,
|
||||
@ -630,6 +630,7 @@ vnet_arp_set_ip4_over_ethernet_internal (vnet_main_t * vnm,
|
||||
|
||||
adj_nbr_walk_nh4 (sw_if_index, &e->ip4_address, arp_mk_complete_walk, e);
|
||||
|
||||
check_customers:
|
||||
/* Customer(s) waiting for this address to be resolved? */
|
||||
p = hash_get (am->pending_resolutions_by_address, a->ip4.as_u32);
|
||||
if (p)
|
||||
|
@ -2168,6 +2168,8 @@ fib_path_encode (fib_node_index_t path_list_index,
|
||||
case FIB_PATH_TYPE_SPECIAL:
|
||||
break;
|
||||
case FIB_PATH_TYPE_DEAG:
|
||||
api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id;
|
||||
api_rpath->dpo = path->fp_dpo;
|
||||
break;
|
||||
case FIB_PATH_TYPE_RECURSIVE:
|
||||
api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip;
|
||||
|
@ -281,27 +281,38 @@ static void
|
||||
(vl_api_sw_interface_add_del_address_t * mp)
|
||||
{
|
||||
vlib_main_t *vm = vlib_get_main ();
|
||||
vnet_main_t *vnm = vnet_get_main ();
|
||||
vl_api_sw_interface_add_del_address_reply_t *rmp;
|
||||
int rv = 0;
|
||||
u32 is_del;
|
||||
clib_error_t *error = 0;
|
||||
|
||||
VALIDATE_SW_IF_INDEX (mp);
|
||||
|
||||
is_del = mp->is_add == 0;
|
||||
vnm->api_errno = 0;
|
||||
|
||||
if (mp->del_all)
|
||||
ip_del_all_interface_addresses (vm, ntohl (mp->sw_if_index));
|
||||
else if (mp->is_ipv6)
|
||||
ip6_add_del_interface_address (vm, ntohl (mp->sw_if_index),
|
||||
(void *) mp->address,
|
||||
mp->address_length, is_del);
|
||||
error = ip6_add_del_interface_address (vm, ntohl (mp->sw_if_index),
|
||||
(void *) mp->address,
|
||||
mp->address_length, is_del);
|
||||
else
|
||||
ip4_add_del_interface_address (vm, ntohl (mp->sw_if_index),
|
||||
(void *) mp->address,
|
||||
mp->address_length, is_del);
|
||||
error = ip4_add_del_interface_address (vm, ntohl (mp->sw_if_index),
|
||||
(void *) mp->address,
|
||||
mp->address_length, is_del);
|
||||
|
||||
if (error)
|
||||
{
|
||||
rv = vnm->api_errno;
|
||||
clib_error_report (error);
|
||||
goto done;
|
||||
}
|
||||
|
||||
BAD_SW_IF_INDEX_LABEL;
|
||||
|
||||
done:
|
||||
REPLY_MACRO (VL_API_SW_INTERFACE_ADD_DEL_ADDRESS_REPLY);
|
||||
}
|
||||
|
||||
@ -864,6 +875,7 @@ static void vl_api_sw_interface_set_mac_address_t_handler
|
||||
vl_api_sw_interface_set_mac_address_reply_t *rmp;
|
||||
vnet_main_t *vnm = vnet_get_main ();
|
||||
u32 sw_if_index = ntohl (mp->sw_if_index);
|
||||
vnet_sw_interface_t *si;
|
||||
u64 mac;
|
||||
clib_error_t *error;
|
||||
int rv = 0;
|
||||
@ -877,7 +889,8 @@ static void vl_api_sw_interface_set_mac_address_t_handler
|
||||
| (u64) mp->mac_address[4] << (8 * 4)
|
||||
| (u64) mp->mac_address[5] << (8 * 5));
|
||||
|
||||
error = vnet_hw_interface_change_mac_address (vnm, sw_if_index, mac);
|
||||
si = vnet_get_sw_interface (vnm, sw_if_index);
|
||||
error = vnet_hw_interface_change_mac_address (vnm, si->hw_if_index, mac);
|
||||
if (error)
|
||||
{
|
||||
rv = VNET_API_ERROR_UNIMPLEMENTED;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user