Thread safe internal buffer manager, take two

First attempt to make internal buffer manager thread safe was not
succesfull, so trying again. This time with more testing.

Change-Id: I01b8385a9c26d233934a3339255ea4bd31c865ac
Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
Damjan Marion
2017-08-29 00:15:35 +02:00
committed by Dave Barach
parent 50958959b5
commit b6a8ed7fa0
3 changed files with 32 additions and 4 deletions

View File

@ -396,6 +396,8 @@ vlib_buffer_create_free_list_helper (vlib_main_t * vm,
hash_set (bm->free_list_by_size, f->n_data_bytes, f->index);
}
clib_spinlock_init (&f->global_buffers_lock);
for (i = 1; i < vec_len (vlib_mains); i++)
{
vlib_buffer_main_t *wbm = vlib_mains[i]->buffer_main;
@ -509,6 +511,7 @@ fill_free_list (vlib_main_t * vm,
vlib_buffer_free_list_t * fl, uword min_free_buffers)
{
vlib_buffer_t *buffers, *b;
vlib_buffer_free_list_t *mfl;
int n, n_bytes, i;
u32 *bi;
u32 n_remaining, n_alloc, n_this_chunk;
@ -518,6 +521,22 @@ fill_free_list (vlib_main_t * vm,
if (n <= 0)
return min_free_buffers;
mfl = vlib_buffer_get_free_list (vlib_mains[0], fl->index);
if (vec_len (mfl->global_buffers) > 0)
{
int n_copy, n_left;
clib_spinlock_lock (&mfl->global_buffers_lock);
n_copy = clib_min (vec_len (mfl->global_buffers), n);
n_left = vec_len (mfl->global_buffers) - n_copy;
vec_add_aligned (fl->buffers, mfl->global_buffers + n_left, n_copy,
CLIB_CACHE_LINE_BYTES);
_vec_len (mfl->global_buffers) = n_left;
clib_spinlock_unlock (&mfl->global_buffers_lock);
n = min_free_buffers - vec_len (fl->buffers);
if (n <= 0)
return min_free_buffers;
}
/* Always allocate round number of buffers. */
n = round_pow2 (n, CLIB_CACHE_LINE_BYTES / sizeof (u32));

View File

@ -350,6 +350,12 @@ typedef struct vlib_buffer_free_list_t
/* Vector of free buffers. Each element is a byte offset into I/O heap. */
u32 *buffers;
/* global vector of free buffers, used only on main thread.
Bufers are returned to global buffers only in case when number of
buffers on free buffers list grows about threshold */
u32 *global_buffers;
clib_spinlock_t global_buffers_lock;
/* Memory chunks allocated for this free list
recorded here so they can be freed when free list
is deleted. */

View File

@ -848,18 +848,21 @@ vlib_buffer_add_to_free_list (vlib_main_t * vm,
u32 buffer_index, u8 do_init)
{
vlib_buffer_t *b;
u32 i;
b = vlib_get_buffer (vm, buffer_index);
if (PREDICT_TRUE (do_init))
vlib_buffer_init_for_free_list (b, f);
vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
if (vec_len (f->buffers) > 3 * VLIB_FRAME_SIZE)
if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
{
vlib_buffer_free_list_t *mf;
mf = vlib_buffer_get_free_list (vlib_mains[0], f->index);
clib_spinlock_lock (&mf->global_buffers_lock);
/* keep last stored buffers, as they are more likely hot in the cache */
for (i = 0; i < VLIB_FRAME_SIZE; i++)
vm->os_physmem_free (vlib_get_buffer (vm, i));
vec_add_aligned (mf->global_buffers, f->buffers, VLIB_FRAME_SIZE,
CLIB_CACHE_LINE_BYTES);
vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
clib_spinlock_unlock (&mf->global_buffers_lock);
}
}