CSIT-844: fix binary api rx pthread heap push/pop

We need to push/pop the rx pthread's heap without affecting other
thread(s).

Search clib_per_cpu_mheaps, locate an unused slot. Duplicate the main
thread heap pointer in that slot, and set __os_thread_index
appropriately.

Don't bail out of vpp_api_test with results pending, e.g. at the end
of a vpp_api_test script. Even though vpp will eventuallly
garbage-collect them, We don't want to leave allocated reply messages
lurking in the api message allocation rings...

This patch is a manual cherry-pick from gerrit 8862

Change-Id: If3c84abe61496905432bfa36767068bad4bd243b
Signed-off-by: Dave Barach <dave@barachs.net>
This commit is contained in:
Dave Barach
2017-10-18 08:03:14 -04:00
committed by Florin Coras
parent 7513c8c7d2
commit fb353ce54e
2 changed files with 35 additions and 3 deletions

View File

@ -297,6 +297,7 @@ main (int argc, char **argv)
u8 *heap;
mheap_t *h;
int i;
f64 timeout;
clib_mem_init (0, 128 << 20);
@ -408,6 +409,18 @@ main (int argc, char **argv)
fclose (vam->ifp);
}
/*
* Particularly when running a script, don't be in a hurry to leave.
* A reply message queued to this process will end up constipating
* the allocation rings.
*/
timeout = vat_time_now (vam) + 2.0;
while (vam->result_ready == 0 && vat_time_now (vam) < timeout)
;
if (vat_time_now (vam) > timeout)
clib_warning ("BUG: message reply spin-wait timeout");
vl_client_disconnect_from_vlib ();
exit (0);
}

View File

@ -80,6 +80,7 @@ rx_thread_fn (void *arg)
unix_shared_memory_queue_t *q;
memory_client_main_t *mm = &memory_client_main;
api_main_t *am = &api_main;
int i;
q = am->vl_input_queue;
@ -87,10 +88,27 @@ rx_thread_fn (void *arg)
if (setjmp (mm->rx_thread_jmpbuf) == 0)
{
mm->rx_thread_jmpbuf_valid = 1;
while (1)
/*
* Find an unused slot in the per-cpu-mheaps array,
* and grab it for this thread. We need to be able to
* push/pop the thread heap without affecting other thread(s).
*/
if (__os_thread_index == 0)
{
vl_msg_api_queue_handler (q);
for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
{
if (clib_per_cpu_mheaps[i] == 0)
{
/* Copy the main thread mheap pointer */
clib_per_cpu_mheaps[i] = clib_per_cpu_mheaps[0];
__os_thread_index = i;
break;
}
}
ASSERT (__os_thread_index > 0);
}
while (1)
vl_msg_api_queue_handler (q);
}
pthread_exit (0);
}
@ -138,7 +156,7 @@ vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp)
/* Recreate the vnet-side API message handler table */
tblv = uword_to_pointer (mp->message_table, u8 *);
serialize_open_vector (sm, tblv);
unserialize_open_data (sm, tblv, vec_len (tblv));
unserialize_integer (sm, &nmsgs, sizeof (u32));
for (i = 0; i < nmsgs; i++)
@ -311,6 +329,7 @@ vl_client_disconnect (void)
/* drain the queue */
if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_DELETE_REPLY)
{
clib_warning ("queue drain: %d", ntohs (rp->_vl_msg_id));
vl_msg_api_handler ((void *) rp);
continue;
}