fib: add barrier sync, pool/vector expand cases

load_balance_alloc_i(...) is not thread safe when the
load_balance_pool or combined counter vectors expand.

Type: fix

Signed-off-by: Dave Barach <dave@barachs.net>
Change-Id: I7f295ed77350d1df0434d5ff461eedafe79131de
(cherry picked from commit 8341f76fd1)
This commit is contained in:
Dave Barach
2020-06-03 08:05:15 -04:00
committed by Andrew Yourtchenko
parent 77335de45a
commit 1c73742cb3
3 changed files with 66 additions and 1 deletions

View File

@@ -119,6 +119,44 @@ vlib_validate_combined_counter (vlib_combined_counter_main_t * cm, u32 index)
3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
}
int
vlib_validate_combined_counter_will_expand
(vlib_combined_counter_main_t * cm, u32 index)
{
vlib_thread_main_t *tm = vlib_get_thread_main ();
int i;
void *oldheap = vlib_stats_push_heap (cm->counters);
/* Possibly once in recorded history */
if (PREDICT_FALSE (vec_len (cm->counters) == 0))
{
vlib_stats_pop_heap (cm, oldheap, index,
3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
return 1;
}
for (i = 0; i < tm->n_vlib_mains; i++)
{
/* Trivially OK, and proves that index >= vec_len(...) */
if (index < vec_len (cm->counters[i]))
continue;
if (_vec_resize_will_expand
(cm->counters[i],
index - vec_len (cm->counters[i]) /* length_increment */ ,
sizeof (cm->counters[i]) /* data_bytes */ ,
0 /* header_bytes */ ,
CLIB_CACHE_LINE_BYTES /* data_alignment */ ))
{
vlib_stats_pop_heap (cm, oldheap, index,
3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
return 1;
}
}
vlib_stats_pop_heap (cm, oldheap, index,
3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
return 0;
}
void
vlib_free_combined_counter (vlib_combined_counter_main_t * cm)
{

View File

@@ -314,6 +314,9 @@ void vlib_free_simple_counter (vlib_simple_counter_main_t * cm);
void vlib_validate_combined_counter (vlib_combined_counter_main_t * cm,
u32 index);
int vlib_validate_combined_counter_will_expand
(vlib_combined_counter_main_t * cm, u32 index);
void vlib_free_combined_counter (vlib_combined_counter_main_t * cm);
/** Obtain the number of simple or combined counters allocated.

View File

@@ -93,12 +93,33 @@ static load_balance_t *
load_balance_alloc_i (void)
{
load_balance_t *lb;
u8 need_barrier_sync = 0;
vlib_main_t *vm = vlib_get_main();
ASSERT (vm->thread_index == 0);
pool_get_aligned_will_expand (load_balance_pool, need_barrier_sync,
CLIB_CACHE_LINE_BYTES);
if (need_barrier_sync)
vlib_worker_thread_barrier_sync (vm);
pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
clib_memset(lb, 0, sizeof(*lb));
lb->lb_map = INDEX_INVALID;
lb->lb_urpf = INDEX_INVALID;
if (need_barrier_sync == 0)
{
need_barrier_sync += vlib_validate_combined_counter_will_expand
(&(load_balance_main.lbm_to_counters),
load_balance_get_index(lb));
need_barrier_sync += vlib_validate_combined_counter_will_expand
(&(load_balance_main.lbm_via_counters),
load_balance_get_index(lb));
if (need_barrier_sync)
vlib_worker_thread_barrier_sync (vm);
}
vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
load_balance_get_index(lb));
vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
@@ -108,6 +129,9 @@ load_balance_alloc_i (void)
vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
load_balance_get_index(lb));
if (need_barrier_sync)
vlib_worker_thread_barrier_release (vm);
return (lb);
}
@@ -1121,7 +1145,7 @@ load_balance_inline (vlib_main_t * vm,
vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0);
}
dpo0 = load_balance_get_bucket_i(lb0,
dpo0 = load_balance_get_bucket_i(lb0,
vnet_buffer(b0)->ip.flow_hash &
(lb0->lb_n_buckets_minus_1));