api: avoid swapping vlib_rp before barrier sync

Type: fix

Change-Id: I9868d13e827c6f5aa5535a38f629efb62ff12dbc
Signed-off-by: Nathan Skrzypczak <nathan.skrzypczak@gmail.com>
Signed-off-by: Florin Coras <fcoras@cisco.com>
This commit is contained in:
Florin Coras
2019-11-27 09:15:25 -08:00
committed by Dave Barach
parent 157a4ab404
commit 8d820857d9
5 changed files with 65 additions and 65 deletions

View File

@ -105,9 +105,10 @@ int vl_msg_api_trace_onoff (api_main_t * am, vl_api_trace_which_t which,
int vl_msg_api_trace_free (api_main_t * am, vl_api_trace_which_t which);
int vl_msg_api_trace_configure (api_main_t * am, vl_api_trace_which_t which,
u32 nitems);
void vl_msg_api_handler_with_vm_node (api_main_t * am,
void vl_msg_api_handler_with_vm_node (api_main_t * am, svm_region_t * vlib_rp,
void *the_msg, vlib_main_t * vm,
vlib_node_runtime_t * node);
vlib_node_runtime_t * node,
u8 is_private);
vl_api_trace_t *vl_msg_api_trace_get (api_main_t * am,
vl_api_trace_which_t which);
void vl_msg_api_add_msg_name_crc (api_main_t * am, const char *string,

View File

@ -525,13 +525,15 @@ msg_handler_internal (api_main_t * am,
/* This is only to be called from a vlib/vnet app */
void
vl_msg_api_handler_with_vm_node (api_main_t * am,
vl_msg_api_handler_with_vm_node (api_main_t * am, svm_region_t * vlib_rp,
void *the_msg, vlib_main_t * vm,
vlib_node_runtime_t * node)
vlib_node_runtime_t * node, u8 is_private)
{
u16 id = clib_net_to_host_u16 (*((u16 *) the_msg));
u8 *(*handler) (void *, void *, void *);
u8 *(*print_fp) (void *, void *);
svm_region_t *old_vlib_rp;
void *save_shmem_hdr;
int is_mp_safe = 1;
if (PREDICT_FALSE (am->elog_trace_api_messages))
@ -581,7 +583,19 @@ vl_msg_api_handler_with_vm_node (api_main_t * am,
vl_msg_api_barrier_trace_context (am->msg_names[id]);
vl_msg_api_barrier_sync ();
}
if (is_private)
{
old_vlib_rp = am->vlib_rp;
save_shmem_hdr = am->shmem_hdr;
am->vlib_rp = vlib_rp;
am->shmem_hdr = (void *) vlib_rp->user_ctx;
}
(*handler) (the_msg, vm, node);
if (is_private)
{
am->vlib_rp = old_vlib_rp;
am->shmem_hdr = save_shmem_hdr;
}
if (!is_mp_safe)
vl_msg_api_barrier_release ();
}

View File

@ -503,10 +503,8 @@ static void
send_memclnt_keepalive (vl_api_registration_t * regp, f64 now)
{
vl_api_memclnt_keepalive_t *mp;
svm_queue_t *q;
api_main_t *am = &api_main;
svm_region_t *save_vlib_rp = am->vlib_rp;
vl_shmem_hdr_t *save_shmem_hdr = am->shmem_hdr;
svm_queue_t *q;
q = regp->vl_input_queue;
@ -530,10 +528,7 @@ send_memclnt_keepalive (vl_api_registration_t * regp, f64 now)
* memory clients..
*/
am->vlib_rp = regp->vlib_rp;
am->shmem_hdr = regp->shmem_hdr;
mp = vl_msg_api_alloc (sizeof (*mp));
mp = vl_mem_api_alloc_as_if_client_w_reg (regp, sizeof (*mp));
clib_memset (mp, 0, sizeof (*mp));
mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_MEMCLNT_KEEPALIVE);
mp->context = mp->client_index =
@ -545,10 +540,7 @@ send_memclnt_keepalive (vl_api_registration_t * regp, f64 now)
/* Failure-to-send due to a stuffed queue is absolutely expected */
if (svm_queue_add (q, (u8 *) & mp, 1 /* nowait */ ))
vl_msg_api_free (mp);
am->vlib_rp = save_vlib_rp;
am->shmem_hdr = save_shmem_hdr;
vl_msg_api_free_w_region (regp->vlib_rp, mp);
}
static void
@ -708,14 +700,20 @@ vl_mem_api_dead_client_scan (api_main_t * am, vl_shmem_hdr_t * shm, f64 now)
}
static inline int
void_mem_api_handle_msg_i (api_main_t * am, vlib_main_t * vm,
vlib_node_runtime_t * node, svm_queue_t * q)
void_mem_api_handle_msg_i (api_main_t * am, svm_region_t * vlib_rp,
vlib_main_t * vm, vlib_node_runtime_t * node,
u8 is_private)
{
svm_queue_t *q;
uword mp;
q = ((vl_shmem_hdr_t *) (void *) vlib_rp->user_ctx)->vl_input_queue;
if (!svm_queue_sub2 (q, (u8 *) & mp))
{
VL_MSG_API_UNPOISON ((void *) mp);
vl_msg_api_handler_with_vm_node (am, (void *) mp, vm, node);
vl_msg_api_handler_with_vm_node (am, vlib_rp, (void *) mp, vm, node,
is_private);
return 0;
}
return -1;
@ -725,8 +723,8 @@ int
vl_mem_api_handle_msg_main (vlib_main_t * vm, vlib_node_runtime_t * node)
{
api_main_t *am = &api_main;
return void_mem_api_handle_msg_i (am, vm, node,
am->shmem_hdr->vl_input_queue);
return void_mem_api_handle_msg_i (am, am->vlib_rp, vm, node,
0 /* is_private */ );
}
int
@ -764,7 +762,8 @@ vl_mem_api_handle_rpc (vlib_main_t * vm, vlib_node_runtime_t * node)
for (i = 0; i < vec_len (vm->processing_rpc_requests); i++)
{
mp = vm->processing_rpc_requests[i];
vl_msg_api_handler_with_vm_node (am, (void *) mp, vm, node);
vl_msg_api_handler_with_vm_node (am, am->vlib_rp, (void *) mp, vm,
node, 0 /* is_private */ );
}
vl_msg_api_barrier_release ();
}
@ -777,22 +776,9 @@ vl_mem_api_handle_msg_private (vlib_main_t * vm, vlib_node_runtime_t * node,
u32 reg_index)
{
api_main_t *am = &api_main;
vl_shmem_hdr_t *save_shmem_hdr = am->shmem_hdr;
svm_region_t *vlib_rp, *save_vlib_rp = am->vlib_rp;
svm_queue_t *q;
int rv;
vlib_rp = am->vlib_rp = am->vlib_private_rps[reg_index];
am->shmem_hdr = (void *) vlib_rp->user_ctx;
q = am->shmem_hdr->vl_input_queue;
rv = void_mem_api_handle_msg_i (am, vm, node, q);
am->shmem_hdr = save_shmem_hdr;
am->vlib_rp = save_vlib_rp;
return rv;
return void_mem_api_handle_msg_i (am, am->vlib_private_rps[reg_index], vm,
node, 1 /* is_private */ );
}
vl_api_registration_t *

View File

@ -43,7 +43,8 @@
#define DEBUG_MESSAGE_BUFFER_OVERRUN 0
CLIB_NOSANITIZE_ADDR static inline void *
vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null)
vl_msg_api_alloc_internal (svm_region_t * vlib_rp, int nbytes, int pool,
int may_return_null)
{
int i;
msgbuf_t *rv;
@ -53,7 +54,7 @@ vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null)
vl_shmem_hdr_t *shmem_hdr;
api_main_t *am = &api_main;
shmem_hdr = am->shmem_hdr;
shmem_hdr = (void *) vlib_rp->user_ctx;
#if DEBUG_MESSAGE_BUFFER_OVERRUN > 0
nbytes += 4;
@ -161,15 +162,15 @@ vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null)
*/
am->ring_misses++;
pthread_mutex_lock (&am->vlib_rp->mutex);
oldheap = svm_push_data_heap (am->vlib_rp);
pthread_mutex_lock (&vlib_rp->mutex);
oldheap = svm_push_data_heap (vlib_rp);
if (may_return_null)
{
rv = clib_mem_alloc_or_null (nbytes);
if (PREDICT_FALSE (rv == 0))
{
svm_pop_heap (oldheap);
pthread_mutex_unlock (&am->vlib_rp->mutex);
pthread_mutex_unlock (&vlib_rp->mutex);
return 0;
}
}
@ -179,7 +180,7 @@ vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null)
rv->q = 0;
rv->gc_mark_timestamp = 0;
svm_pop_heap (oldheap);
pthread_mutex_unlock (&am->vlib_rp->mutex);
pthread_mutex_unlock (&vlib_rp->mutex);
out:
#if DEBUG_MESSAGE_BUFFER_OVERRUN > 0
@ -207,7 +208,8 @@ vl_msg_api_alloc (int nbytes)
* Clients use pool-0, vlib proc uses pool 1
*/
pool = (am->our_pid == shmem_hdr->vl_pid);
return vl_msg_api_alloc_internal (nbytes, pool, 0 /* may_return_null */ );
return vl_msg_api_alloc_internal (am->vlib_rp, nbytes, pool,
0 /* may_return_null */ );
}
void *
@ -228,13 +230,15 @@ vl_msg_api_alloc_or_null (int nbytes)
vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
pool = (am->our_pid == shmem_hdr->vl_pid);
return vl_msg_api_alloc_internal (nbytes, pool, 1 /* may_return_null */ );
return vl_msg_api_alloc_internal (am->vlib_rp, nbytes, pool,
1 /* may_return_null */ );
}
void *
vl_msg_api_alloc_as_if_client (int nbytes)
{
return vl_msg_api_alloc_internal (nbytes, 0, 0 /* may_return_null */ );
return vl_msg_api_alloc_internal (api_main.vlib_rp, nbytes, 0,
0 /* may_return_null */ );
}
void *
@ -250,34 +254,22 @@ vl_msg_api_alloc_zero_as_if_client (int nbytes)
void *
vl_msg_api_alloc_as_if_client_or_null (int nbytes)
{
return vl_msg_api_alloc_internal (nbytes, 0, 1 /* may_return_null */ );
return vl_msg_api_alloc_internal (api_main.vlib_rp, nbytes, 0,
1 /* may_return_null */ );
}
void *
vl_mem_api_alloc_as_if_client_w_reg (vl_api_registration_t * reg, int nbytes)
{
api_main_t *am = &api_main;
vl_shmem_hdr_t *save_shmem_hdr = am->shmem_hdr;
svm_region_t *vlib_rp, *save_vlib_rp = am->vlib_rp;
void *msg;
vlib_rp = am->vlib_rp = reg->vlib_rp;
am->shmem_hdr = (void *) vlib_rp->user_ctx;
msg = vl_msg_api_alloc_internal (nbytes, 0, 0 /* may_return_null */ );
am->shmem_hdr = save_shmem_hdr;
am->vlib_rp = save_vlib_rp;
return msg;
return vl_msg_api_alloc_internal (reg->vlib_rp, nbytes, 0,
0 /* may_return_null */ );
}
void
vl_msg_api_free (void *a)
vl_msg_api_free_w_region (svm_region_t * vlib_rp, void *a)
{
msgbuf_t *rv;
void *oldheap;
api_main_t *am = &api_main;
rv = (msgbuf_t *) (((u8 *) a) - offsetof (msgbuf_t, data));
@ -301,8 +293,8 @@ vl_msg_api_free (void *a)
return;
}
pthread_mutex_lock (&am->vlib_rp->mutex);
oldheap = svm_push_data_heap (am->vlib_rp);
pthread_mutex_lock (&vlib_rp->mutex);
oldheap = svm_push_data_heap (vlib_rp);
#if DEBUG_MESSAGE_BUFFER_OVERRUN > 0
{
@ -314,7 +306,13 @@ vl_msg_api_free (void *a)
clib_mem_free (rv);
svm_pop_heap (oldheap);
pthread_mutex_unlock (&am->vlib_rp->mutex);
pthread_mutex_unlock (&vlib_rp->mutex);
}
void
vl_msg_api_free (void *a)
{
vl_msg_api_free_w_region (api_main.vlib_rp, a);
}
static void

View File

@ -117,6 +117,7 @@ void *vl_msg_api_alloc_as_if_client_or_null (int nbytes);
void *vl_mem_api_alloc_as_if_client_w_reg (vl_api_registration_t * reg,
int nbytes);
void vl_msg_api_free (void *a);
void vl_msg_api_free_w_region (svm_region_t * vlib_rp, void *a);
int vl_map_shmem (const char *region_name, int is_vlib);
void vl_unmap_shmem (void);
void vl_unmap_shmem_client (void);