vppinfra: refactor *_will_expand() functions
Type: refactor Change-Id: I3625eacf9e04542ca8778df5d46075a8654642c7 Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
parent
05563c9a90
commit
66d4cb5a21
@ -69,14 +69,12 @@ ip_adjacency_t *
|
||||
adj_alloc (fib_protocol_t proto)
|
||||
{
|
||||
ip_adjacency_t *adj;
|
||||
u8 need_barrier_sync = 0;
|
||||
u8 need_barrier_sync = pool_get_will_expand (adj_pool);
|
||||
vlib_main_t *vm;
|
||||
vm = vlib_get_main();
|
||||
|
||||
ASSERT (vm->thread_index == 0);
|
||||
|
||||
pool_get_aligned_will_expand (adj_pool, need_barrier_sync,
|
||||
CLIB_CACHE_LINE_BYTES);
|
||||
/* If the adj_pool will expand, stop the parade. */
|
||||
if (need_barrier_sync)
|
||||
vlib_worker_thread_barrier_sync (vm);
|
||||
|
@ -431,8 +431,7 @@ vnet_crypto_key_add (vlib_main_t * vm, vnet_crypto_alg_t alg, u8 * data,
|
||||
if (!vnet_crypto_key_len_check (alg, length))
|
||||
return ~0;
|
||||
|
||||
pool_get_aligned_will_expand (cm->keys, need_barrier_sync,
|
||||
CLIB_CACHE_LINE_BYTES);
|
||||
need_barrier_sync = pool_get_will_expand (cm->keys);
|
||||
/* If the cm->keys will expand, stop the parade. */
|
||||
if (need_barrier_sync)
|
||||
vlib_worker_thread_barrier_sync (vm);
|
||||
|
@ -543,7 +543,7 @@ dpo_get_next_node_by_type_and_proto (dpo_type_t child_type,
|
||||
|
||||
#define dpo_pool_barrier_sync(VM,P,YESNO) \
|
||||
do { \
|
||||
pool_get_aligned_will_expand ((P), YESNO, CLIB_CACHE_LINE_BYTES); \
|
||||
YESNO = pool_get_will_expand (P); \
|
||||
\
|
||||
if (YESNO) \
|
||||
{ \
|
||||
|
@ -100,8 +100,8 @@ load_balance_alloc_i (void)
|
||||
vlib_main_t *vm = vlib_get_main();
|
||||
ASSERT (vm->thread_index == 0);
|
||||
|
||||
pool_get_aligned_will_expand (load_balance_pool, need_barrier_sync,
|
||||
CLIB_CACHE_LINE_BYTES);
|
||||
need_barrier_sync = pool_get_will_expand (load_balance_pool);
|
||||
|
||||
if (need_barrier_sync)
|
||||
vlib_worker_thread_barrier_sync (vm);
|
||||
|
||||
|
@ -537,11 +537,10 @@ fib_entry_alloc (u32 fib_index,
|
||||
{
|
||||
fib_entry_t *fib_entry;
|
||||
fib_prefix_t *fep;
|
||||
u8 need_barrier_sync = 0;
|
||||
u8 need_barrier_sync = pool_get_will_expand (fib_entry_pool);
|
||||
vlib_main_t *vm = vlib_get_main();
|
||||
ASSERT (vm->thread_index == 0);
|
||||
|
||||
pool_get_will_expand (fib_entry_pool, need_barrier_sync );
|
||||
if (need_barrier_sync)
|
||||
vlib_worker_thread_barrier_sync (vm);
|
||||
|
||||
|
@ -55,11 +55,10 @@ index_t
|
||||
fib_urpf_list_alloc_and_lock (void)
|
||||
{
|
||||
fib_urpf_list_t *urpf;
|
||||
u8 need_barrier_sync = 0;
|
||||
u8 need_barrier_sync = pool_get_will_expand (fib_urpf_list_pool);
|
||||
vlib_main_t *vm = vlib_get_main();
|
||||
ASSERT (vm->thread_index == 0);
|
||||
|
||||
pool_get_will_expand (fib_urpf_list_pool, need_barrier_sync );
|
||||
if (need_barrier_sync)
|
||||
vlib_worker_thread_barrier_sync (vm);
|
||||
|
||||
|
@ -298,11 +298,9 @@ static ip_pmtu_dpo_t *
|
||||
ip_pmtu_dpo_alloc (void)
|
||||
{
|
||||
vlib_main_t *vm = vlib_get_main ();
|
||||
u8 need_barrier_sync = 0;
|
||||
u8 need_barrier_sync = pool_get_will_expand (ip_pmtu_dpo_pool);
|
||||
ip_pmtu_dpo_t *ipm;
|
||||
|
||||
pool_get_aligned_will_expand (ip_pmtu_dpo_pool, need_barrier_sync,
|
||||
sizeof (ip_pmtu_dpo_t));
|
||||
|
||||
if (need_barrier_sync)
|
||||
vlib_worker_thread_barrier_sync (vm);
|
||||
|
@ -202,9 +202,8 @@ session_alloc (u32 thread_index)
|
||||
{
|
||||
session_worker_t *wrk = &session_main.wrk[thread_index];
|
||||
session_t *s;
|
||||
u8 will_expand = 0;
|
||||
pool_get_aligned_will_expand (wrk->sessions, will_expand,
|
||||
CLIB_CACHE_LINE_BYTES);
|
||||
u8 will_expand = pool_get_will_expand (wrk->sessions);
|
||||
|
||||
/* If we have peekers, let them finish */
|
||||
if (PREDICT_FALSE (will_expand && vlib_num_workers ()))
|
||||
{
|
||||
|
@ -115,11 +115,10 @@ u32
|
||||
tls_ctx_half_open_alloc (void)
|
||||
{
|
||||
tls_main_t *tm = &tls_main;
|
||||
u8 will_expand = 0;
|
||||
u8 will_expand = pool_get_will_expand (tm->half_open_ctx_pool);
|
||||
tls_ctx_t *ctx;
|
||||
u32 ctx_index;
|
||||
|
||||
pool_get_aligned_will_expand (tm->half_open_ctx_pool, will_expand, 0);
|
||||
if (PREDICT_FALSE (will_expand && vlib_num_workers ()))
|
||||
{
|
||||
clib_rwlock_writer_lock (&tm->half_open_rwlock);
|
||||
|
@ -94,9 +94,7 @@ udp_connection_alloc (u32 thread_index)
|
||||
{
|
||||
udp_main_t *um = &udp_main;
|
||||
udp_connection_t *uc;
|
||||
u32 will_expand = 0;
|
||||
pool_get_aligned_will_expand (um->connections[thread_index], will_expand,
|
||||
CLIB_CACHE_LINE_BYTES);
|
||||
u32 will_expand = pool_get_will_expand (um->connections[thread_index]);
|
||||
|
||||
if (PREDICT_FALSE (will_expand))
|
||||
{
|
||||
|
@ -208,9 +208,7 @@ clib_bitmap_set (uword * ai, uword i, uword value)
|
||||
always_inline u8
|
||||
clib_bitmap_will_expand (uword *ai, uword i)
|
||||
{
|
||||
uword i0 = i / BITS (ai[0]);
|
||||
return _vec_resize_will_expand (ai, 1, i0 * sizeof (ai[0]), 0,
|
||||
sizeof (uword));
|
||||
return (i / BITS (ai[0])) < vec_max_len (ai);
|
||||
}
|
||||
|
||||
/** Gets the ith bit value from a bitmap
|
||||
|
@ -249,57 +249,46 @@ do { \
|
||||
/** Allocate an object E from a pool P and zero it */
|
||||
#define pool_get_zero(P,E) pool_get_aligned_zero(P,E,0)
|
||||
|
||||
/** See if pool_get will expand the pool or not */
|
||||
#define pool_get_aligned_will_expand(P,YESNO,A) \
|
||||
do { \
|
||||
pool_header_t * _pool_var (p) = pool_header (P); \
|
||||
uword _pool_var (l); \
|
||||
\
|
||||
_pool_var (l) = 0; \
|
||||
if (P) \
|
||||
{ \
|
||||
if (_pool_var (p)->max_elts) \
|
||||
_pool_var (l) = _pool_var (p)->max_elts; \
|
||||
else \
|
||||
_pool_var (l) = vec_len (_pool_var (p)->free_indices); \
|
||||
} \
|
||||
\
|
||||
/* Free elements, certainly won't expand */ \
|
||||
if (_pool_var (l) > 0) \
|
||||
YESNO=0; \
|
||||
else \
|
||||
{ \
|
||||
/* Nothing on free list, make a new element and return it. */ \
|
||||
YESNO = _vec_resize_will_expand \
|
||||
(P, \
|
||||
/* length_increment */ 1, \
|
||||
/* new size */ (vec_len (P) + 1) * sizeof (P[0]), \
|
||||
pool_aligned_header_bytes, \
|
||||
/* align */ (A)); \
|
||||
} \
|
||||
} while (0)
|
||||
always_inline int
|
||||
_pool_get_will_expand (void *p, uword elt_size)
|
||||
{
|
||||
pool_header_t *ph;
|
||||
uword len;
|
||||
|
||||
/** See if pool_put will expand free_bitmap or free_indices or not */
|
||||
#define pool_put_will_expand(P, E, YESNO) \
|
||||
do \
|
||||
{ \
|
||||
pool_header_t *_pool_var (p) = pool_header (P); \
|
||||
\
|
||||
uword _pool_var (i) = (E) - (P); \
|
||||
/* free_bitmap or free_indices may expand. */ \
|
||||
YESNO = \
|
||||
clib_bitmap_will_expand (_pool_var (p)->free_bitmap, _pool_var (i)); \
|
||||
\
|
||||
YESNO += _vec_resize_will_expand ( \
|
||||
_pool_var (p)->free_indices, 1, \
|
||||
(vec_len (_pool_var (p)->free_indices) + 1) * \
|
||||
sizeof (_pool_var (p)->free_indices[0]), \
|
||||
0, 0); \
|
||||
} \
|
||||
while (0)
|
||||
if (p == 0)
|
||||
return 1;
|
||||
|
||||
/** Tell the caller if pool get will expand the pool */
|
||||
#define pool_get_will_expand(P,YESNO) pool_get_aligned_will_expand(P,YESNO,0)
|
||||
ph = pool_header (p);
|
||||
|
||||
if (ph->max_elts)
|
||||
len = ph->max_elts;
|
||||
else
|
||||
len = vec_len (ph->free_indices);
|
||||
|
||||
/* Free elements, certainly won't expand */
|
||||
if (len > 0)
|
||||
return 0;
|
||||
|
||||
return _vec_resize_will_expand (p, 1, elt_size);
|
||||
}
|
||||
|
||||
#define pool_get_will_expand(P) _pool_get_will_expand (P, sizeof ((P)[0]))
|
||||
|
||||
always_inline int
|
||||
_pool_put_will_expand (void *p, uword index, uword elt_size)
|
||||
{
|
||||
pool_header_t *ph = pool_header (p);
|
||||
|
||||
if (clib_bitmap_will_expand (ph->free_bitmap, index))
|
||||
return 1;
|
||||
|
||||
if (vec_resize_will_expand (ph->free_indices, 1))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define pool_put_will_expand(P, E) _pool_put_will_expand (P, (E) - (P), sizeof ((P)[0])
|
||||
|
||||
/** Use free bitmap to query whether given element is free. */
|
||||
#define pool_is_free(P,E) \
|
||||
|
@ -151,12 +151,6 @@ vec_resize_allocate_memory (void *v,
|
||||
return v + header_bytes;
|
||||
}
|
||||
|
||||
__clib_export uword
|
||||
clib_mem_is_vec_h (void *v, uword header_bytes)
|
||||
{
|
||||
return clib_mem_is_heap_object (vec_header (v));
|
||||
}
|
||||
|
||||
__clib_export u32
|
||||
vec_len_not_inline (void *v)
|
||||
{
|
||||
|
@ -184,26 +184,14 @@ _vec_resize_inline (void *v,
|
||||
*/
|
||||
|
||||
always_inline int
|
||||
_vec_resize_will_expand (void *v,
|
||||
word length_increment,
|
||||
uword data_bytes, uword header_bytes,
|
||||
uword data_align)
|
||||
_vec_resize_will_expand (void *v, uword n_elts, uword elt_size)
|
||||
{
|
||||
uword new_data_bytes, aligned_header_bytes;
|
||||
|
||||
aligned_header_bytes = vec_header_bytes (header_bytes);
|
||||
|
||||
new_data_bytes = data_bytes + aligned_header_bytes;
|
||||
|
||||
if (PREDICT_TRUE (v != 0))
|
||||
{
|
||||
void *p = v - aligned_header_bytes;
|
||||
|
||||
/* Vector header must start heap object. */
|
||||
ASSERT (clib_mem_is_heap_object (p));
|
||||
ASSERT (clib_mem_is_heap_object (vec_header (v)));
|
||||
|
||||
/* Typically we'll not need to resize. */
|
||||
if (new_data_bytes <= clib_mem_size (p))
|
||||
if (vec_mem_size (v) >= ((_vec_len (v) + n_elts)) * elt_size)
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
@ -217,22 +205,7 @@ _vec_resize_will_expand (void *v,
|
||||
*/
|
||||
|
||||
#define vec_resize_will_expand(V, N) \
|
||||
({ \
|
||||
word _v (n) = (N); \
|
||||
word _v (l) = vec_len (V); \
|
||||
_vec_resize_will_expand ((V), _v (n), \
|
||||
(_v (l) + _v (n)) * sizeof ((V)[0]), 0, 0); \
|
||||
})
|
||||
|
||||
/** \brief Predicate function, says whether the supplied vector is a clib heap
|
||||
object (general version).
|
||||
|
||||
@param v pointer to a vector
|
||||
@param header_bytes vector header size in bytes (may be zero)
|
||||
@return 0 or 1
|
||||
*/
|
||||
uword clib_mem_is_vec_h (void *v, uword header_bytes);
|
||||
|
||||
_vec_resize_will_expand (V, N, sizeof ((V)[0]))
|
||||
|
||||
/** \brief Predicate function, says whether the supplied vector is a clib heap
|
||||
object
|
||||
@ -243,7 +216,7 @@ uword clib_mem_is_vec_h (void *v, uword header_bytes);
|
||||
always_inline uword
|
||||
clib_mem_is_vec (void *v)
|
||||
{
|
||||
return clib_mem_is_vec_h (v, 0);
|
||||
return clib_mem_is_heap_object (vec_header (v));
|
||||
}
|
||||
|
||||
/* Local variable naming macro (prevents collisions with other macro naming). */
|
||||
|
Loading…
x
Reference in New Issue
Block a user