vapi: support api clients within vpp process

Add vapi_connect_from_vpp() and vapi_disconnect_from_vpp()
calls to allow API clients from within VPP process.

Add a new memclnt_create version that gives the user a
knob to enable or disable dead client scans (keepalive).

Type: feature
Signed-off-by: Ole Troan <ot@cisco.com>
Change-Id: Id0b7bb89308db3a3aed2d3fcbedf4e1282dcd03f
Signed-off-by: Ole Troan <ot@cisco.com>
This commit is contained in:
Ole Troan
2022-01-27 16:25:43 +01:00
parent 6a2868734c
commit 2ca88ff978
9 changed files with 675 additions and 60 deletions

View File

@ -15,9 +15,10 @@ set(chacha20_poly1305)
if (OPENSSL_VERSION VERSION_GREATER_EQUAL 1.1.0)
set(chacha20_poly1305 crypto/chacha20_poly1305.c)
endif()
include_directories(${CMAKE_SOURCE_DIR}/vpp-api ${CMAKE_CURRENT_BINARY_DIR}/../../vpp-api)
add_vpp_plugin(unittest
SOURCES
api_test.c
api_fuzz_test.c
bier_test.c
bihash_test.c
@ -60,4 +61,5 @@ add_vpp_plugin(unittest
COMPONENT
vpp-plugin-devtools
LINK_LIBRARIES vapiclient
)

View File

@ -0,0 +1,101 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright(c) 2022 Cisco Systems, Inc.
*/
#include <vnet/vnet.h>
#include <vnet/plugin/plugin.h>
#include <vlibapi/api.h>
#include <vlibmemory/api.h>
#include <vpp/app/version.h>
#include <stdbool.h>
#include <vapi/vapi.h>
#include <vapi/memclnt.api.vapi.h>
#include <vapi/vlib.api.vapi.h>
#include <vapi/vpe.api.vapi.h>
/*
* Example of how to call the VPP binary API from an internal API client.
* Using the VAPI C language binding.
*/
DEFINE_VAPI_MSG_IDS_VPE_API_JSON;
/*
* Connect an VPP binary API client to VPP API
*/
static vapi_ctx_t
connect_to_vpp (void)
{
vapi_ctx_t ctx;
if (vapi_ctx_alloc (&ctx) != VAPI_OK)
{
clib_warning ("ctx_alloc failed");
return 0;
}
if (vapi_connect_from_vpp (ctx, "apifromplugin", 64, 32, VAPI_MODE_BLOCKING,
true) != VAPI_OK)
{
clib_warning ("vapi_connect failed");
return 0;
}
return ctx;
}
/*
* Gets called when the show_version_reply message is received
*/
vapi_error_e
show_version_cb (vapi_ctx_t ctx, void *caller_ctx, vapi_error_e rv,
bool is_last, vapi_payload_show_version_reply *p)
{
if (rv != VAPI_OK)
clib_warning ("Return value: %d", rv);
fformat (
stdout,
"show_version_reply: program: `%s', version: `%s', build directory: "
"`%s', build date: `%s'\n",
p->program, p->version, p->build_directory, p->build_date);
return VAPI_OK;
}
static void *
api_show_version_blocking_fn (void *args)
{
vapi_ctx_t ctx;
if ((ctx = connect_to_vpp ()) == 0)
return clib_error_return (0, "API connection failed");
int called;
vapi_msg_show_version *sv = vapi_alloc_show_version (ctx);
vapi_error_e vapi_rv = vapi_show_version (ctx, sv, show_version_cb, &called);
if (vapi_rv != VAPI_OK)
clib_warning ("call failed");
vapi_disconnect_from_vpp (ctx);
vapi_ctx_free (ctx);
return 0;
}
static clib_error_t *
test_api_test_command_fn (vlib_main_t *vm, unformat_input_t *input,
vlib_cli_command_t *cmd)
{
/* Run call in a pthread */
pthread_t thread;
int rv = pthread_create (&thread, NULL, api_show_version_blocking_fn, 0);
if (rv)
{
return clib_error_return (0, "API call failed");
}
return 0;
}
VLIB_CLI_COMMAND (test_api_command, static) = {
.path = "test api internal",
.short_help = "test internal api client",
.function = test_api_test_command_fn,
};

View File

@ -75,6 +75,8 @@ typedef struct vl_api_registration_
/* socket client only */
u32 server_handle; /**< Socket client only: server handle */
u32 server_index; /**< Socket client only: server index */
bool keepalive; /**< Dead client scan */
} vl_api_registration_t;
#define VL_API_INVALID_FI ((u32)~0)

View File

@ -232,3 +232,19 @@ define control_ping_reply
u32 vpe_pid;
};
define memclnt_create_v2 {
u32 context; /* opaque value to be returned in the reply */
i32 ctx_quota; /* requested punt context quota */
u64 input_queue; /* client's queue */
string name[64]; /* for show, find by name, whatever */
u32 api_versions[8]; /* client-server pairs use as desired */
bool keepalive[default=true]; /* dead client scan keepalives */
};
define memclnt_create_v2_reply {
u32 context; /* opaque value from the create request */
i32 response; /* Non-negative = success */
u64 handle; /* handle by which vlib knows this client */
u32 index; /* index, used e.g. by API trace replay */
u64 message_table; /* serialized message table in shmem */
};

View File

@ -192,6 +192,7 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp)
regp->name = format (0, "%s", mp->name);
vec_add1 (regp->name, 0);
regp->keepalive = true;
if (am->serialized_message_table_in_shmem == 0)
am->serialized_message_table_in_shmem =
@ -217,6 +218,87 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp)
vl_msg_api_send_shmem (q, (u8 *) & rp);
}
void
vl_api_memclnt_create_v2_t_handler (vl_api_memclnt_create_v2_t *mp)
{
vl_api_registration_t **regpp;
vl_api_registration_t *regp;
vl_api_memclnt_create_v2_reply_t *rp;
svm_queue_t *q;
int rv = 0;
void *oldheap;
api_main_t *am = vlibapi_get_main ();
u8 *msg_table;
/*
* This is tortured. Maintain a vlib-address-space private
* pool of client registrations. We use the shared-memory virtual
* address of client structure as a handle, to allow direct
* manipulation of context quota vbls from the client library.
*
* This scheme causes trouble w/ API message trace replay, since
* some random VA from clib_mem_alloc() certainly won't
* occur in the Linux sim. The (very) few places
* that care need to use the pool index.
*
* Putting the registration object(s) into a pool in shared memory and
* using the pool index as a handle seems like a great idea.
* Unfortunately, each and every reference to that pool would need
* to be protected by a mutex:
*
* Client VLIB
* ------ ----
* convert pool index to
* pointer.
* <deschedule>
* expand pool
* <deschedule>
* kaboom!
*/
pool_get (am->vl_clients, regpp);
oldheap = vl_msg_push_heap ();
*regpp = clib_mem_alloc (sizeof (vl_api_registration_t));
regp = *regpp;
clib_memset (regp, 0, sizeof (*regp));
regp->registration_type = REGISTRATION_TYPE_SHMEM;
regp->vl_api_registration_pool_index = regpp - am->vl_clients;
regp->vlib_rp = am->vlib_rp;
regp->shmem_hdr = am->shmem_hdr;
regp->clib_file_index = am->shmem_hdr->clib_file_index;
q = regp->vl_input_queue = (svm_queue_t *) (uword) mp->input_queue;
VL_MSG_API_SVM_QUEUE_UNPOISON (q);
regp->name = format (0, "%s", mp->name);
vec_add1 (regp->name, 0);
regp->keepalive = mp->keepalive;
if (am->serialized_message_table_in_shmem == 0)
am->serialized_message_table_in_shmem =
vl_api_serialize_message_table (am, 0);
if (am->vlib_rp != am->vlib_primary_rp)
msg_table = vl_api_serialize_message_table (am, 0);
else
msg_table = am->serialized_message_table_in_shmem;
vl_msg_pop_heap (oldheap);
rp = vl_msg_api_alloc (sizeof (*rp));
rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE_V2_REPLY);
rp->handle = (uword) regp;
rp->index = vl_msg_api_handle_from_index_and_epoch (
regp->vl_api_registration_pool_index, am->shmem_hdr->application_restarts);
rp->context = mp->context;
rp->response = ntohl (rv);
rp->message_table = pointer_to_uword (msg_table);
vl_msg_api_send_shmem (q, (u8 *) &rp);
}
void
vl_api_call_reaper_functions (u32 client_index)
{
@ -399,6 +481,7 @@ vl_api_memclnt_keepalive_t_handler (vl_api_memclnt_keepalive_t * mp)
#define foreach_vlib_api_msg \
_ (MEMCLNT_CREATE, memclnt_create, 0) \
_ (MEMCLNT_CREATE_V2, memclnt_create_v2, 0) \
_ (MEMCLNT_DELETE, memclnt_delete, 0) \
_ (MEMCLNT_KEEPALIVE, memclnt_keepalive, 0) \
_ (MEMCLNT_KEEPALIVE_REPLY, memclnt_keepalive_reply, 0)
@ -578,8 +661,10 @@ vl_mem_api_dead_client_scan (api_main_t * am, vl_shmem_hdr_t * shm, f64 now)
/* *INDENT-OFF* */
pool_foreach (regpp, am->vl_clients) {
if (!(*regpp)->keepalive)
continue;
vl_mem_send_client_keepalive_w_reg (am, now, regpp, &dead_indices,
&confused_indices);
&confused_indices);
}
/* *INDENT-ON* */
@ -944,7 +1029,7 @@ vl_api_client_index_to_input_queue (u32 index)
static clib_error_t *
setup_memclnt_exit (vlib_main_t * vm)
{
atexit (vl_unmap_shmem);
atexit (vl_unmap_shmem_client);
return 0;
}

View File

@ -1624,24 +1624,28 @@ interface_api_hookup (vlib_main_t * vm)
{
api_main_t *am = vlibapi_get_main ();
/* Mark these APIs as mp safe */
am->is_mp_safe[VL_API_SW_INTERFACE_DUMP] = 1;
am->is_mp_safe[VL_API_SW_INTERFACE_DETAILS] = 1;
am->is_mp_safe[VL_API_SW_INTERFACE_TAG_ADD_DEL] = 1;
am->is_mp_safe[VL_API_SW_INTERFACE_SET_INTERFACE_NAME] = 1;
/* Do not replay VL_API_SW_INTERFACE_DUMP messages */
am->api_trace_cfg[VL_API_SW_INTERFACE_DUMP].replay_enable = 0;
/* Mark these APIs as autoendian */
am->is_autoendian[VL_API_SW_INTERFACE_SET_TX_PLACEMENT] = 1;
am->is_autoendian[VL_API_SW_INTERFACE_TX_PLACEMENT_GET] = 1;
/*
* Set up the (msg_name, crc, message-id) table
*/
REPLY_MSG_ID_BASE = setup_message_id_table ();
/* Mark these APIs as mp safe */
am->is_mp_safe[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_DUMP] = 1;
am->is_mp_safe[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_DETAILS] = 1;
am->is_mp_safe[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_TAG_ADD_DEL] = 1;
am->is_mp_safe[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_SET_INTERFACE_NAME] =
1;
/* Do not replay VL_API_SW_INTERFACE_DUMP messages */
am->api_trace_cfg[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_DUMP]
.replay_enable = 0;
/* Mark these APIs as autoendian */
am->is_autoendian[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_SET_TX_PLACEMENT] =
1;
am->is_autoendian[REPLY_MSG_ID_BASE + VL_API_SW_INTERFACE_TX_PLACEMENT_GET] =
1;
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -44,7 +44,7 @@ extern "C"
* process). It's not recommended to mix the higher and lower level APIs. Due
* to version issues, the higher-level APIs are not part of the shared library.
*/
typedef struct vapi_ctx_s *vapi_ctx_t;
typedef struct vapi_ctx_s *vapi_ctx_t;
/**
* @brief allocate vapi message of given size
@ -56,7 +56,7 @@ extern "C"
*
* @return pointer to message or NULL if out of memory
*/
void *vapi_msg_alloc (vapi_ctx_t ctx, size_t size);
void *vapi_msg_alloc (vapi_ctx_t ctx, size_t size);
/**
* @brief free a vapi message
@ -66,7 +66,7 @@ extern "C"
* @param ctx opaque vapi context
* @param msg message to be freed
*/
void vapi_msg_free (vapi_ctx_t ctx, void *msg);
void vapi_msg_free (vapi_ctx_t ctx, void *msg);
/**
* @brief allocate vapi context
@ -75,18 +75,18 @@ extern "C"
*
* @return VAPI_OK on success, other error code on error
*/
vapi_error_e vapi_ctx_alloc (vapi_ctx_t * result);
vapi_error_e vapi_ctx_alloc (vapi_ctx_t *result);
/**
* @brief free vapi context
*/
void vapi_ctx_free (vapi_ctx_t ctx);
void vapi_ctx_free (vapi_ctx_t ctx);
/**
* @brief check if message identified by it's message id is known by the vpp to
* which the connection is open
*/
bool vapi_is_msg_available (vapi_ctx_t ctx, vapi_msg_id_t type);
bool vapi_is_msg_available (vapi_ctx_t ctx, vapi_msg_id_t type);
/**
* @brief connect to vpp
@ -101,11 +101,30 @@ extern "C"
*
* @return VAPI_OK on success, other error code on error
*/
vapi_error_e vapi_connect (vapi_ctx_t ctx, const char *name,
const char *chroot_prefix,
int max_outstanding_requests,
int response_queue_size, vapi_mode_e mode,
bool handle_keepalives);
vapi_error_e vapi_connect (vapi_ctx_t ctx, const char *name,
const char *chroot_prefix,
int max_outstanding_requests,
int response_queue_size, vapi_mode_e mode,
bool handle_keepalives);
/**
* @brief connect to vpp from a client in same process
* @remark This MUST be called from a separate thread. If called
* from the main thread, it will deadlock.
*
* @param ctx opaque vapi context, must be allocated using vapi_ctx_alloc first
* @param name application name
* @param max_outstanding_requests max number of outstanding requests queued
* @param response_queue_size size of the response queue
* @param mode mode of operation - blocking or nonblocking
* @param handle_keepalives - if true, automatically handle memclnt_keepalive
*
* @return VAPI_OK on success, other error code on error
*/
vapi_error_e vapi_connect_from_vpp (vapi_ctx_t ctx, const char *name,
int max_outstanding_requests,
int response_queue_size, vapi_mode_e mode,
bool handle_keepalives);
/**
* @brief disconnect from vpp
@ -114,7 +133,8 @@ extern "C"
*
* @return VAPI_OK on success, other error code on error
*/
vapi_error_e vapi_disconnect (vapi_ctx_t ctx);
vapi_error_e vapi_disconnect (vapi_ctx_t ctx);
vapi_error_e vapi_disconnect_from_vpp (vapi_ctx_t ctx);
/**
* @brief get event file descriptor
@ -127,7 +147,7 @@ extern "C"
*
* @return VAPI_OK on success, other error code on error
*/
vapi_error_e vapi_get_fd (vapi_ctx_t ctx, int *fd);
vapi_error_e vapi_get_fd (vapi_ctx_t ctx, int *fd);
/**
* @brief low-level api for sending messages to vpp
@ -140,7 +160,7 @@ extern "C"
*
* @return VAPI_OK on success, other error code on error
*/
vapi_error_e vapi_send (vapi_ctx_t ctx, void *msg);
vapi_error_e vapi_send (vapi_ctx_t ctx, void *msg);
/**
* @brief low-level api for atomically sending two messages to vpp - either

20
test/test_api_client.py Normal file
View File

@ -0,0 +1,20 @@
#!/usr/bin/env python3
import unittest
from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
class TestAPIClient(VppTestCase):
""" API Internal client Test Cases """
def test_client_unittest(self):
""" Internal API client """
error = self.vapi.cli("test api internal")
if error:
self.logger.critical(error)
self.assertNotIn('failed', error)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)