vppinfra: add atomic macros for __sync builtins
This is first part of addition of atomic macros with only macros for __sync builtins. - Based on earlier patch by Damjan (https://gerrit.fd.io/r/#/c/10729/) Additionally - clib_atomic_release macro added and used in the absence of any memory barrier. - clib_atomic_bool_cmp_and_swap added Change-Id: Ie4e48c1e184a652018d1d0d87c4be80ddd180a3b Original-patch-by: Damjan Marion <damarion@cisco.com> Signed-off-by: Sirshak Das <sirshak.das@arm.com> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com> Reviewed-by: Steve Capper <steve.capper@arm.com>
This commit is contained in:
Sirshak Das
committed by
Damjan Marion
parent
bf3443b0f8
commit
2f6d7bb93c
@ -43,11 +43,11 @@ typedef struct {
|
||||
extern jvpp_main_t jvpp_main __attribute__((aligned (64)));
|
||||
|
||||
static_always_inline u32 vppjni_get_context_id(jvpp_main_t * jm) {
|
||||
return __sync_add_and_fetch(&jm->context_id, 1);
|
||||
return clib_atomic_add_fetch(&jm->context_id, 1);
|
||||
}
|
||||
|
||||
static_always_inline void vppjni_lock(jvpp_main_t * jm, u32 tag) {
|
||||
while (__sync_lock_test_and_set(&jm->lock, 1))
|
||||
while (clib_atomic_test_and_set(&jm->lock))
|
||||
;
|
||||
jm->tag = tag;
|
||||
}
|
||||
|
@ -410,7 +410,7 @@ acl_fa_deactivate_session (acl_main_t * am, u32 sw_if_index,
|
||||
}
|
||||
|
||||
sess->deleted = 1;
|
||||
clib_smp_atomic_add (&am->fa_session_total_deactivations, 1);
|
||||
clib_atomic_fetch_add (&am->fa_session_total_deactivations, 1);
|
||||
clib_mem_set_heap (oldheap);
|
||||
}
|
||||
|
||||
@ -432,7 +432,7 @@ acl_fa_put_session (acl_main_t * am, u32 sw_if_index,
|
||||
vec_validate (pw->fa_session_dels_by_sw_if_index, sw_if_index);
|
||||
clib_mem_set_heap (oldheap);
|
||||
pw->fa_session_dels_by_sw_if_index[sw_if_index]++;
|
||||
clib_smp_atomic_add (&am->fa_session_total_dels, 1);
|
||||
clib_atomic_fetch_add (&am->fa_session_total_dels, 1);
|
||||
}
|
||||
|
||||
always_inline int
|
||||
@ -571,7 +571,7 @@ acl_fa_add_session (acl_main_t * am, int is_input, int is_ip6,
|
||||
vec_validate (pw->fa_session_adds_by_sw_if_index, sw_if_index);
|
||||
clib_mem_set_heap (oldheap);
|
||||
pw->fa_session_adds_by_sw_if_index[sw_if_index]++;
|
||||
clib_smp_atomic_add (&am->fa_session_total_adds, 1);
|
||||
clib_atomic_fetch_add (&am->fa_session_total_adds, 1);
|
||||
return sess;
|
||||
}
|
||||
|
||||
|
@ -162,7 +162,7 @@ static_always_inline
|
||||
if (PREDICT_FALSE (xd->lockp != 0))
|
||||
{
|
||||
queue_id = queue_id % xd->tx_q_used;
|
||||
while (__sync_lock_test_and_set (xd->lockp[queue_id], 1))
|
||||
while (clib_atomic_test_and_set (xd->lockp[queue_id]))
|
||||
/* zzzz */
|
||||
queue_id = (queue_id + 1) % xd->tx_q_used;
|
||||
}
|
||||
@ -191,7 +191,7 @@ static_always_inline
|
||||
}
|
||||
|
||||
if (PREDICT_FALSE (xd->lockp != 0))
|
||||
*xd->lockp[queue_id] = 0;
|
||||
clib_atomic_release (xd->lockp[queue_id]);
|
||||
|
||||
if (PREDICT_FALSE (n_sent < 0))
|
||||
{
|
||||
|
@ -191,7 +191,7 @@ ip6_ioam_analyse_set_paths_down (ioam_analyser_data_t * data)
|
||||
ioam_path_map_t *path;
|
||||
u8 k, i;
|
||||
|
||||
while (__sync_lock_test_and_set (data->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (data->writer_lock))
|
||||
;
|
||||
|
||||
trace_data = &data->trace_data;
|
||||
@ -208,7 +208,7 @@ ip6_ioam_analyse_set_paths_down (ioam_analyser_data_t * data)
|
||||
for (k = 0; k < trace_record->num_nodes; k++)
|
||||
path[k].state_up = 0;
|
||||
}
|
||||
*(data->writer_lock) = 0;
|
||||
clib_atomic_release (data->writer_lock);
|
||||
}
|
||||
|
||||
always_inline void
|
||||
@ -225,7 +225,7 @@ ip6_ioam_analyse_hbh_trace_loopback (ioam_analyser_data_t * data,
|
||||
u16 size_of_traceopt_per_node;
|
||||
u16 size_of_all_traceopts;
|
||||
|
||||
while (__sync_lock_test_and_set (data->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (data->writer_lock))
|
||||
;
|
||||
|
||||
trace_data = &data->trace_data;
|
||||
@ -277,7 +277,7 @@ ip6_ioam_analyse_hbh_trace_loopback (ioam_analyser_data_t * data,
|
||||
}
|
||||
}
|
||||
end:
|
||||
*(data->writer_lock) = 0;
|
||||
clib_atomic_release (data->writer_lock);
|
||||
}
|
||||
|
||||
always_inline int
|
||||
@ -295,7 +295,7 @@ ip6_ioam_analyse_hbh_trace (ioam_analyser_data_t * data,
|
||||
ioam_path_map_t *path = NULL;
|
||||
ioam_analyse_trace_record *trace_record;
|
||||
|
||||
while (__sync_lock_test_and_set (data->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (data->writer_lock))
|
||||
;
|
||||
|
||||
trace_data = &data->trace_data;
|
||||
@ -409,7 +409,7 @@ found_match:
|
||||
(u32) ((sum + delay) / (data->seqno_data.rx_packets + 1));
|
||||
}
|
||||
DONE:
|
||||
*(data->writer_lock) = 0;
|
||||
clib_atomic_release (data->writer_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -417,13 +417,14 @@ always_inline int
|
||||
ip6_ioam_analyse_hbh_e2e (ioam_analyser_data_t * data,
|
||||
ioam_e2e_packet_t * e2e, u16 len)
|
||||
{
|
||||
while (__sync_lock_test_and_set (data->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (data->writer_lock))
|
||||
;
|
||||
|
||||
ioam_analyze_seqno (&data->seqno_data,
|
||||
(u64) clib_net_to_host_u32 (e2e->e2e_data));
|
||||
|
||||
*(data->writer_lock) = 0;
|
||||
clib_atomic_release (data->writer_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -510,7 +511,8 @@ ioam_analyse_init_data (ioam_analyser_data_t * data)
|
||||
|
||||
data->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
|
||||
CLIB_CACHE_LINE_BYTES);
|
||||
*(data->writer_lock) = 0;
|
||||
|
||||
clib_atomic_release (data->writer_lock);
|
||||
|
||||
trace_data = &(data->trace_data);
|
||||
for (j = 0; j < IOAM_MAX_PATHS_PER_FLOW; j++)
|
||||
|
@ -150,7 +150,7 @@ ioam_analyse_add_ipfix_record (flow_report_t * fr,
|
||||
ip6_address_t * src, ip6_address_t * dst,
|
||||
u16 src_port, u16 dst_port)
|
||||
{
|
||||
while (__sync_lock_test_and_set (record->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (record->writer_lock))
|
||||
;
|
||||
|
||||
int field_index = 0;
|
||||
@ -259,7 +259,7 @@ ioam_analyse_add_ipfix_record (flow_report_t * fr,
|
||||
*(record->chached_data_list) = *record;
|
||||
record->chached_data_list->chached_data_list = NULL;
|
||||
|
||||
*(record->writer_lock) = 0;
|
||||
clib_atomic_release (record->writer_lock);
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
@ -256,17 +256,17 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
data0 = ioam_analyse_get_data_from_flow_id (flow_id0);
|
||||
data1 = ioam_analyse_get_data_from_flow_id (flow_id1);
|
||||
|
||||
while (__sync_lock_test_and_set (data0->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (data0->writer_lock))
|
||||
;
|
||||
data0->pkt_counter++;
|
||||
data0->bytes_counter += p_len0;
|
||||
*(data0->writer_lock) = 0;
|
||||
clib_atomic_release (data0->writer_lock);
|
||||
|
||||
while (__sync_lock_test_and_set (data1->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (data1->writer_lock))
|
||||
;
|
||||
data1->pkt_counter++;
|
||||
data1->bytes_counter += p_len1;
|
||||
*(data1->writer_lock) = 0;
|
||||
clib_atomic_release (data1->writer_lock);
|
||||
}
|
||||
else if (error0 == 0)
|
||||
{
|
||||
@ -274,11 +274,11 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
pkts_failed++;
|
||||
|
||||
data0 = ioam_analyse_get_data_from_flow_id (flow_id0);
|
||||
while (__sync_lock_test_and_set (data0->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (data0->writer_lock))
|
||||
;
|
||||
data0->pkt_counter++;
|
||||
data0->bytes_counter += p_len0;
|
||||
*(data0->writer_lock) = 0;
|
||||
clib_atomic_release (data0->writer_lock);
|
||||
}
|
||||
else if (error1 == 0)
|
||||
{
|
||||
@ -286,11 +286,11 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
pkts_failed++;
|
||||
|
||||
data1 = ioam_analyse_get_data_from_flow_id (flow_id1);
|
||||
while (__sync_lock_test_and_set (data1->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (data1->writer_lock))
|
||||
;
|
||||
data1->pkt_counter++;
|
||||
data1->bytes_counter += p_len1;
|
||||
*(data1->writer_lock) = 0;
|
||||
clib_atomic_release (data1->writer_lock);
|
||||
}
|
||||
else
|
||||
pkts_failed += 2;
|
||||
@ -327,12 +327,12 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
{
|
||||
pkts_analysed++;
|
||||
data0 = ioam_analyse_get_data_from_flow_id (flow_id0);
|
||||
while (__sync_lock_test_and_set (data0->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (data0->writer_lock))
|
||||
;
|
||||
data0->pkt_counter++;
|
||||
data0->bytes_counter +=
|
||||
clib_net_to_host_u16 (ip60->payload_length);
|
||||
*(data0->writer_lock) = 0;
|
||||
clib_atomic_release (data0->writer_lock);
|
||||
}
|
||||
else
|
||||
pkts_failed++;
|
||||
@ -393,13 +393,13 @@ ip6_ioam_analyse_hbh_pot (u32 flow_id, ip6_hop_by_hop_option_t * opt0,
|
||||
pot_profile = pot_profile_get_active ();
|
||||
ret = pot_validate (pot_profile, cumulative, random);
|
||||
|
||||
while (__sync_lock_test_and_set (data->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (data->writer_lock))
|
||||
;
|
||||
|
||||
(0 == ret) ? (data->pot_data.sfc_validated_count++) :
|
||||
(data->pot_data.sfc_invalidated_count++);
|
||||
|
||||
*(data->writer_lock) = 0;
|
||||
clib_atomic_release (data->writer_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -436,11 +436,11 @@ ioam_export_process_common (ioam_export_main_t * em, vlib_main_t * vm,
|
||||
*/
|
||||
for (i = 0; i < vec_len (thread_index); i++)
|
||||
{
|
||||
while (__sync_lock_test_and_set (em->lockp[thread_index[i]], 1))
|
||||
while (clib_atomic_test_and_set (em->lockp[thread_index[i]]))
|
||||
;
|
||||
em->buffer_per_thread[thread_index[i]] =
|
||||
vec_pop (vec_buffer_indices);
|
||||
*em->lockp[thread_index[i]] = 0;
|
||||
clib_atomic_release (em->lockp[thread_index[i]]);
|
||||
}
|
||||
|
||||
/* Send the buffers */
|
||||
@ -479,7 +479,7 @@ do { \
|
||||
from = vlib_frame_vector_args (F); \
|
||||
n_left_from = (F)->n_vectors; \
|
||||
next_index = (N)->cached_next_index; \
|
||||
while (__sync_lock_test_and_set ((EM)->lockp[(VM)->thread_index], 1)); \
|
||||
while (clib_atomic_test_and_set ((EM)->lockp[(VM)->thread_index])); \
|
||||
my_buf = ioam_export_get_my_buffer (EM, (VM)->thread_index); \
|
||||
my_buf->touched_at = vlib_time_now (VM); \
|
||||
while (n_left_from > 0) \
|
||||
|
@ -27,8 +27,8 @@
|
||||
|
||||
lb_main_t lb_main;
|
||||
|
||||
#define lb_get_writer_lock() do {} while(__sync_lock_test_and_set (lb_main.writer_lock, 1))
|
||||
#define lb_put_writer_lock() lb_main.writer_lock[0] = 0
|
||||
#define lb_get_writer_lock() do {} while(clib_atomic_test_and_set (lb_main.writer_lock))
|
||||
#define lb_put_writer_lock() clib_atomic_release (lb_main.writer_lock)
|
||||
|
||||
static void lb_as_stack (lb_as_t *as);
|
||||
|
||||
|
@ -474,7 +474,7 @@ map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id,
|
||||
void
|
||||
map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop);
|
||||
|
||||
#define map_ip4_reass_lock() while (__sync_lock_test_and_set(map_main.ip4_reass_lock, 1)) {}
|
||||
#define map_ip4_reass_lock() while (clib_atomic_test_and_set (map_main.ip4_reass_lock)) {}
|
||||
#define map_ip4_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip4_reass_lock = 0;} while(0)
|
||||
|
||||
static_always_inline void
|
||||
@ -499,7 +499,7 @@ map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id,
|
||||
void
|
||||
map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop);
|
||||
|
||||
#define map_ip6_reass_lock() while (__sync_lock_test_and_set(map_main.ip6_reass_lock, 1)) {}
|
||||
#define map_ip6_reass_lock() while (clib_atomic_test_and_set (map_main.ip6_reass_lock)) {}
|
||||
#define map_ip6_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip6_reass_lock = 0;} while(0)
|
||||
|
||||
int
|
||||
@ -555,14 +555,14 @@ static inline void
|
||||
map_domain_counter_lock (map_main_t *mm)
|
||||
{
|
||||
if (mm->counter_lock)
|
||||
while (__sync_lock_test_and_set(mm->counter_lock, 1))
|
||||
while (clib_atomic_test_and_set (mm->counter_lock))
|
||||
/* zzzz */ ;
|
||||
}
|
||||
static inline void
|
||||
map_domain_counter_unlock (map_main_t *mm)
|
||||
{
|
||||
if (mm->counter_lock)
|
||||
*mm->counter_lock = 0;
|
||||
clib_atomic_release (mm->counter_lock);
|
||||
}
|
||||
|
||||
|
||||
|
@ -159,13 +159,13 @@ snat_det_ses_create (snat_det_map_t * dm, ip4_address_t * in_addr,
|
||||
{
|
||||
if (!dm->sessions[i + user_offset].in_port)
|
||||
{
|
||||
if (__sync_bool_compare_and_swap
|
||||
if (clib_atomic_bool_cmp_and_swap
|
||||
(&dm->sessions[i + user_offset].in_port, 0, in_port))
|
||||
{
|
||||
dm->sessions[i + user_offset].out.as_u64 = out->as_u64;
|
||||
dm->sessions[i + user_offset].state = SNAT_SESSION_UNKNOWN;
|
||||
dm->sessions[i + user_offset].expire = 0;
|
||||
__sync_add_and_fetch (&dm->ses_num, 1);
|
||||
clib_atomic_add_fetch (&dm->ses_num, 1);
|
||||
return &dm->sessions[i + user_offset];
|
||||
}
|
||||
}
|
||||
@ -179,10 +179,10 @@ snat_det_ses_create (snat_det_map_t * dm, ip4_address_t * in_addr,
|
||||
always_inline void
|
||||
snat_det_ses_close (snat_det_map_t * dm, snat_det_session_t * ses)
|
||||
{
|
||||
if (__sync_bool_compare_and_swap (&ses->in_port, ses->in_port, 0))
|
||||
if (clib_atomic_bool_cmp_and_swap (&ses->in_port, ses->in_port, 0))
|
||||
{
|
||||
ses->out.as_u64 = 0;
|
||||
__sync_add_and_fetch (&dm->ses_num, -1);
|
||||
clib_atomic_add_fetch (&dm->ses_num, -1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -108,7 +108,7 @@ svm_msg_q_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index)
|
||||
msg.ring_index = ring - mq->rings;
|
||||
msg.elt_index = ring->tail;
|
||||
ring->tail = (ring->tail + 1) % ring->nitems;
|
||||
__sync_fetch_and_add (&ring->cursize, 1);
|
||||
clib_atomic_fetch_add (&ring->cursize, 1);
|
||||
return msg;
|
||||
}
|
||||
|
||||
@ -155,7 +155,7 @@ svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes)
|
||||
msg.ring_index = ring - mq->rings;
|
||||
msg.elt_index = ring->tail;
|
||||
ring->tail = (ring->tail + 1) % ring->nitems;
|
||||
__sync_fetch_and_add (&ring->cursize, 1);
|
||||
clib_atomic_fetch_add (&ring->cursize, 1);
|
||||
break;
|
||||
}
|
||||
return msg;
|
||||
@ -185,7 +185,7 @@ svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
|
||||
/* for now, expect messages to be processed in order */
|
||||
ASSERT (0);
|
||||
}
|
||||
__sync_fetch_and_sub (&ring->cursize, 1);
|
||||
clib_atomic_fetch_sub (&ring->cursize, 1);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -103,7 +103,7 @@ ssvm_lock (ssvm_shared_header_t * h, u32 my_pid, u32 tag)
|
||||
return;
|
||||
}
|
||||
|
||||
while (__sync_lock_test_and_set (&h->lock, 1))
|
||||
while (clib_atomic_test_and_set (&h->lock))
|
||||
;
|
||||
|
||||
h->owner_pid = my_pid;
|
||||
@ -114,7 +114,7 @@ ssvm_lock (ssvm_shared_header_t * h, u32 my_pid, u32 tag)
|
||||
always_inline void
|
||||
ssvm_lock_non_recursive (ssvm_shared_header_t * h, u32 tag)
|
||||
{
|
||||
while (__sync_lock_test_and_set (&h->lock, 1))
|
||||
while (clib_atomic_test_and_set (&h->lock))
|
||||
;
|
||||
|
||||
h->tag = tag;
|
||||
|
@ -513,7 +513,7 @@ CLIB_MARCH_FN (svm_fifo_enqueue_nowait, int, svm_fifo_t * f, u32 max_bytes,
|
||||
|
||||
/* Atomically increase the queue length */
|
||||
ASSERT (cursize + total_copy_bytes <= nitems);
|
||||
__sync_fetch_and_add (&f->cursize, total_copy_bytes);
|
||||
clib_atomic_fetch_add (&f->cursize, total_copy_bytes);
|
||||
|
||||
return (total_copy_bytes);
|
||||
}
|
||||
@ -659,7 +659,7 @@ CLIB_MARCH_FN (svm_fifo_dequeue_nowait, int, svm_fifo_t * f, u32 max_bytes,
|
||||
|
||||
ASSERT (f->head <= nitems);
|
||||
ASSERT (cursize >= total_copy_bytes);
|
||||
__sync_fetch_and_sub (&f->cursize, total_copy_bytes);
|
||||
clib_atomic_fetch_sub (&f->cursize, total_copy_bytes);
|
||||
|
||||
return (total_copy_bytes);
|
||||
}
|
||||
@ -757,7 +757,7 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes)
|
||||
|
||||
ASSERT (f->head <= nitems);
|
||||
ASSERT (cursize >= total_drop_bytes);
|
||||
__sync_fetch_and_sub (&f->cursize, total_drop_bytes);
|
||||
clib_atomic_fetch_sub (&f->cursize, total_drop_bytes);
|
||||
|
||||
return total_drop_bytes;
|
||||
}
|
||||
@ -766,7 +766,7 @@ void
|
||||
svm_fifo_dequeue_drop_all (svm_fifo_t * f)
|
||||
{
|
||||
f->head = f->tail;
|
||||
__sync_fetch_and_sub (&f->cursize, f->cursize);
|
||||
clib_atomic_fetch_sub (&f->cursize, f->cursize);
|
||||
}
|
||||
|
||||
int
|
||||
@ -813,7 +813,7 @@ svm_fifo_segments_free (svm_fifo_t * f, svm_fifo_segment_t * fs)
|
||||
f->head = (f->head + fs[0].len) % f->nitems;
|
||||
total_drop_bytes = fs[0].len;
|
||||
}
|
||||
__sync_fetch_and_sub (&f->cursize, total_drop_bytes);
|
||||
clib_atomic_fetch_sub (&f->cursize, total_drop_bytes);
|
||||
}
|
||||
|
||||
u32
|
||||
|
@ -169,7 +169,7 @@ svm_fifo_set_event (svm_fifo_t * f)
|
||||
always_inline void
|
||||
svm_fifo_unset_event (svm_fifo_t * f)
|
||||
{
|
||||
__sync_lock_release (&f->has_event);
|
||||
clib_atomic_release (&f->has_event);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -923,7 +923,7 @@ vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
|
||||
tail->total_length_not_including_first_buffer;
|
||||
|
||||
next_segment:
|
||||
__sync_add_and_fetch (&tail->n_add_refs, 1);
|
||||
clib_atomic_add_fetch (&tail->n_add_refs, 1);
|
||||
|
||||
if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
|
||||
{
|
||||
@ -1153,7 +1153,7 @@ vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
|
||||
|
||||
oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
|
||||
|
||||
while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
|
||||
while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
|
||||
;
|
||||
|
||||
p = hash_get (vlib_buffer_state_validation_hash, b);
|
||||
@ -1196,7 +1196,7 @@ vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
|
||||
|
||||
oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
|
||||
|
||||
while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
|
||||
while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
|
||||
;
|
||||
|
||||
hash_set (vlib_buffer_state_validation_hash, b, expected);
|
||||
|
@ -516,7 +516,7 @@ vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
|
||||
|
||||
ASSERT (fq);
|
||||
|
||||
new_tail = __sync_add_and_fetch (&fq->tail, 1);
|
||||
new_tail = clib_atomic_add_fetch (&fq->tail, 1);
|
||||
|
||||
/* Wait until a ring slot is available */
|
||||
while (new_tail >= fq->head + fq->nelts)
|
||||
@ -576,12 +576,12 @@ vlib_worker_thread_init (vlib_worker_thread_t * w)
|
||||
{
|
||||
|
||||
/* Initial barrier sync, for both worker and i/o threads */
|
||||
clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
|
||||
clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
|
||||
|
||||
while (*vlib_worker_threads->wait_at_barrier)
|
||||
;
|
||||
|
||||
clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
|
||||
clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1310,22 +1310,6 @@ cpu_config (vlib_main_t * vm, unformat_input_t * input)
|
||||
|
||||
VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
|
||||
|
||||
#if !defined (__x86_64__) && !defined (__i386__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__)
|
||||
void
|
||||
__sync_fetch_and_add_8 (void)
|
||||
{
|
||||
fformat (stderr, "%s called\n", __FUNCTION__);
|
||||
abort ();
|
||||
}
|
||||
|
||||
void
|
||||
__sync_add_and_fetch_8 (void)
|
||||
{
|
||||
fformat (stderr, "%s called\n", __FUNCTION__);
|
||||
abort ();
|
||||
}
|
||||
#endif
|
||||
|
||||
void vnet_main_fixup (vlib_fork_fixup_t which) __attribute__ ((weak));
|
||||
void
|
||||
vnet_main_fixup (vlib_fork_fixup_t which)
|
||||
@ -1493,8 +1477,8 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm)
|
||||
|
||||
/* Do per thread rebuilds in parallel */
|
||||
refork_needed = 1;
|
||||
clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
|
||||
(vec_len (vlib_mains) - 1));
|
||||
clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
|
||||
(vec_len (vlib_mains) - 1));
|
||||
now = vlib_time_now (vm);
|
||||
t_update_main = now - vm->barrier_epoch;
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ vlib_worker_thread_barrier_check (void)
|
||||
ed->thread_index = thread_index;
|
||||
}
|
||||
|
||||
clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
|
||||
clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
|
||||
if (CLIB_DEBUG > 0)
|
||||
{
|
||||
vm = vlib_get_main ();
|
||||
@ -424,7 +424,7 @@ vlib_worker_thread_barrier_check (void)
|
||||
;
|
||||
if (CLIB_DEBUG > 0)
|
||||
vm->parked_at_barrier = 0;
|
||||
clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
|
||||
clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
|
||||
|
||||
if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required))
|
||||
{
|
||||
@ -450,8 +450,8 @@ vlib_worker_thread_barrier_check (void)
|
||||
}
|
||||
|
||||
vlib_worker_thread_node_refork ();
|
||||
clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
|
||||
-1);
|
||||
clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
|
||||
-1);
|
||||
while (*vlib_worker_threads->node_reforks_required)
|
||||
;
|
||||
}
|
||||
@ -519,7 +519,7 @@ vlib_get_frame_queue_elt (u32 frame_queue_index, u32 index)
|
||||
fq = fqm->vlib_frame_queues[index];
|
||||
ASSERT (fq);
|
||||
|
||||
new_tail = __sync_add_and_fetch (&fq->tail, 1);
|
||||
new_tail = clib_atomic_add_fetch (&fq->tail, 1);
|
||||
|
||||
/* Wait until a ring slot is available */
|
||||
while (new_tail >= fq->head_hint + fq->nelts)
|
||||
|
@ -44,7 +44,7 @@ cj_log (u32 type, void *data0, void *data1)
|
||||
if (cjm->enable == 0)
|
||||
return;
|
||||
|
||||
new_tail = __sync_add_and_fetch (&cjm->tail, 1);
|
||||
new_tail = clib_atomic_add_fetch (&cjm->tail, 1);
|
||||
|
||||
r = (cj_record_t *) & (cjm->records[new_tail & (cjm->num_records - 1)]);
|
||||
r->time = vlib_time_now (cjm->vlib_main);
|
||||
|
@ -444,7 +444,7 @@ vnet_classify_add_del (vnet_classify_table_t * t,
|
||||
|
||||
hash >>= t->log2_nbuckets;
|
||||
|
||||
while (__sync_lock_test_and_set (t->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (t->writer_lock))
|
||||
;
|
||||
|
||||
/* First elt in the bucket? */
|
||||
|
@ -122,7 +122,7 @@ vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
|
||||
static_always_inline int
|
||||
vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
|
||||
{
|
||||
return __sync_lock_test_and_set (vui->vring_locks[qid], 1);
|
||||
return clib_atomic_test_and_set (vui->vring_locks[qid]);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -141,7 +141,7 @@ vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
|
||||
static_always_inline void
|
||||
vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
|
||||
{
|
||||
*vui->vring_locks[qid] = 0;
|
||||
clib_atomic_release (vui->vring_locks[qid]);
|
||||
}
|
||||
|
||||
static_always_inline void
|
||||
|
@ -187,7 +187,7 @@ dns_cache_lock (dns_main_t * dm)
|
||||
{
|
||||
if (dm->cache_lock)
|
||||
{
|
||||
while (__sync_lock_test_and_set (dm->cache_lock, 1))
|
||||
while (clib_atomic_test_and_set (dm->cache_lock))
|
||||
;
|
||||
}
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ gre_interface_tx (vlib_main_t * vm,
|
||||
/* Encap GRE seq# and ERSPAN type II header */
|
||||
vlib_buffer_advance (b0, -sizeof (erspan_t2_t));
|
||||
erspan_t2_t *h0 = vlib_buffer_get_current (b0);
|
||||
u32 seq_num = clib_smp_atomic_add (>0->gre_sn->seq_num, 1);
|
||||
u32 seq_num = clib_atomic_fetch_add (>0->gre_sn->seq_num, 1);
|
||||
u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2);
|
||||
h0->seq_num = clib_host_to_net_u32 (seq_num);
|
||||
h0->t2_u64 = hdr;
|
||||
@ -418,7 +418,7 @@ gre_interface_tx (vlib_main_t * vm,
|
||||
/* Encap GRE seq# and ERSPAN type II header */
|
||||
vlib_buffer_advance (b1, -sizeof (erspan_t2_t));
|
||||
erspan_t2_t *h1 = vlib_buffer_get_current (b1);
|
||||
u32 seq_num = clib_smp_atomic_add (>1->gre_sn->seq_num, 1);
|
||||
u32 seq_num = clib_atomic_fetch_add (>1->gre_sn->seq_num, 1);
|
||||
u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2);
|
||||
h1->seq_num = clib_host_to_net_u32 (seq_num);
|
||||
h1->t2_u64 = hdr;
|
||||
@ -473,7 +473,7 @@ gre_interface_tx (vlib_main_t * vm,
|
||||
/* Encap GRE seq# and ERSPAN type II header */
|
||||
vlib_buffer_advance (b0, -sizeof (erspan_t2_t));
|
||||
erspan_t2_t *h0 = vlib_buffer_get_current (b0);
|
||||
u32 seq_num = clib_smp_atomic_add (>0->gre_sn->seq_num, 1);
|
||||
u32 seq_num = clib_atomic_fetch_add (>0->gre_sn->seq_num, 1);
|
||||
u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2);
|
||||
h0->seq_num = clib_host_to_net_u32 (seq_num);
|
||||
h0->t2_u64 = hdr;
|
||||
|
@ -872,7 +872,7 @@ static inline void
|
||||
vnet_interface_counter_lock (vnet_interface_main_t * im)
|
||||
{
|
||||
if (im->sw_if_counter_lock)
|
||||
while (__sync_lock_test_and_set (im->sw_if_counter_lock, 1))
|
||||
while (clib_atomic_test_and_set (im->sw_if_counter_lock))
|
||||
/* zzzz */ ;
|
||||
}
|
||||
|
||||
@ -880,7 +880,7 @@ static inline void
|
||||
vnet_interface_counter_unlock (vnet_interface_main_t * im)
|
||||
{
|
||||
if (im->sw_if_counter_lock)
|
||||
*im->sw_if_counter_lock = 0;
|
||||
clib_atomic_release (im->sw_if_counter_lock);
|
||||
}
|
||||
|
||||
void vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add);
|
||||
|
@ -254,7 +254,7 @@ set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m,
|
||||
else if (new_leaf_dst_address_bits >=
|
||||
ply->dst_address_bits_of_leaves[i])
|
||||
{
|
||||
__sync_val_compare_and_swap (&ply->leaves[i], old_leaf, new_leaf);
|
||||
clib_atomic_cmp_and_swap (&ply->leaves[i], old_leaf, new_leaf);
|
||||
ASSERT (ply->leaves[i] == new_leaf);
|
||||
ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits;
|
||||
ply->n_non_empty_leafs += ip4_fib_mtrie_leaf_is_non_empty (ply, i);
|
||||
@ -319,8 +319,8 @@ set_leaf (ip4_fib_mtrie_t * m,
|
||||
|
||||
old_ply->dst_address_bits_of_leaves[i] =
|
||||
a->dst_address_length;
|
||||
__sync_val_compare_and_swap (&old_ply->leaves[i], old_leaf,
|
||||
new_leaf);
|
||||
clib_atomic_cmp_and_swap (&old_ply->leaves[i], old_leaf,
|
||||
new_leaf);
|
||||
ASSERT (old_ply->leaves[i] == new_leaf);
|
||||
|
||||
old_ply->n_non_empty_leafs +=
|
||||
@ -378,8 +378,8 @@ set_leaf (ip4_fib_mtrie_t * m,
|
||||
/* Refetch since ply_create may move pool. */
|
||||
old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
|
||||
|
||||
__sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf,
|
||||
new_leaf);
|
||||
clib_atomic_cmp_and_swap (&old_ply->leaves[dst_byte], old_leaf,
|
||||
new_leaf);
|
||||
ASSERT (old_ply->leaves[dst_byte] == new_leaf);
|
||||
old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
|
||||
|
||||
@ -451,8 +451,8 @@ set_root_leaf (ip4_fib_mtrie_t * m,
|
||||
* the new one */
|
||||
old_ply->dst_address_bits_of_leaves[slot] =
|
||||
a->dst_address_length;
|
||||
__sync_val_compare_and_swap (&old_ply->leaves[slot],
|
||||
old_leaf, new_leaf);
|
||||
clib_atomic_cmp_and_swap (&old_ply->leaves[slot],
|
||||
old_leaf, new_leaf);
|
||||
ASSERT (old_ply->leaves[slot] == new_leaf);
|
||||
}
|
||||
else
|
||||
@ -498,8 +498,8 @@ set_root_leaf (ip4_fib_mtrie_t * m,
|
||||
ply_base_len);
|
||||
new_ply = get_next_ply_for_leaf (m, new_leaf);
|
||||
|
||||
__sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf,
|
||||
new_leaf);
|
||||
clib_atomic_cmp_and_swap (&old_ply->leaves[dst_byte], old_leaf,
|
||||
new_leaf);
|
||||
ASSERT (old_ply->leaves[dst_byte] == new_leaf);
|
||||
old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ ipfix_classify_send_flows (flow_report_main_t * frm,
|
||||
|
||||
t = pool_elt_at_index (vcm->tables, table->classify_table_index);
|
||||
|
||||
while (__sync_lock_test_and_set (t->writer_lock, 1))
|
||||
while (clib_atomic_test_and_set (t->writer_lock))
|
||||
;
|
||||
|
||||
for (i = 0; i < t->nbuckets; i++)
|
||||
@ -385,7 +385,7 @@ flush:
|
||||
bi0 = ~0;
|
||||
}
|
||||
|
||||
*(t->writer_lock) = 0;
|
||||
clib_atomic_release (t->writer_lock);
|
||||
return f;
|
||||
}
|
||||
|
||||
|
@ -300,8 +300,8 @@ mfib_forward_itf_signal (vlib_main_t *vm,
|
||||
{
|
||||
mfib_itf_flags_t old_flags;
|
||||
|
||||
old_flags = __sync_fetch_and_or(&mfi->mfi_flags,
|
||||
MFIB_ITF_FLAG_SIGNAL_PRESENT);
|
||||
old_flags = clib_atomic_fetch_or(&mfi->mfi_flags,
|
||||
MFIB_ITF_FLAG_SIGNAL_PRESENT);
|
||||
|
||||
if (!(old_flags & MFIB_ITF_FLAG_SIGNAL_PRESENT))
|
||||
{
|
||||
|
@ -71,14 +71,14 @@ mfib_signal_module_init (void)
|
||||
static inline void
|
||||
mfib_signal_lock_aquire (void)
|
||||
{
|
||||
while (__sync_lock_test_and_set (&mfib_signal_pending.mip_lock, 1))
|
||||
while (clib_atomic_test_and_set (&mfib_signal_pending.mip_lock))
|
||||
;
|
||||
}
|
||||
|
||||
static inline void
|
||||
mfib_signal_lock_release (void)
|
||||
{
|
||||
mfib_signal_pending.mip_lock = 0;
|
||||
clib_atomic_release(&mfib_signal_pending.mip_lock);
|
||||
}
|
||||
|
||||
#define MFIB_SIGNAL_CRITICAL_SECTION(_body) \
|
||||
@ -117,8 +117,8 @@ mfib_signal_send_one (struct vl_api_registration_ *reg,
|
||||
mfs = pool_elt_at_index(mfib_signal_pool, si);
|
||||
mfi = mfib_itf_get(mfs->mfs_itf);
|
||||
mfi->mfi_si = INDEX_INVALID;
|
||||
__sync_fetch_and_and(&mfi->mfi_flags,
|
||||
~MFIB_ITF_FLAG_SIGNAL_PRESENT);
|
||||
clib_atomic_fetch_and(&mfi->mfi_flags,
|
||||
~MFIB_ITF_FLAG_SIGNAL_PRESENT);
|
||||
|
||||
|
||||
vl_mfib_signal_send_one(reg, context, mfs);
|
||||
|
@ -54,7 +54,7 @@ pg_output (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
|
||||
pg_interface_t *pif = pool_elt_at_index (pg->interfaces, rd->dev_instance);
|
||||
|
||||
if (PREDICT_FALSE (pif->lockp != 0))
|
||||
while (__sync_lock_test_and_set (pif->lockp, 1))
|
||||
while (clib_atomic_test_and_set (pif->lockp))
|
||||
;
|
||||
|
||||
while (n_left > 0)
|
||||
@ -82,7 +82,8 @@ pg_output (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
|
||||
|
||||
vlib_buffer_free (vm, vlib_frame_args (frame), n_buffers);
|
||||
if (PREDICT_FALSE (pif->lockp != 0))
|
||||
*pif->lockp = 0;
|
||||
clib_atomic_release (pif->lockp);
|
||||
|
||||
return n_buffers;
|
||||
}
|
||||
|
||||
|
@ -263,8 +263,8 @@ echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
{
|
||||
stream_session_t *s;
|
||||
|
||||
__sync_fetch_and_add (&ecm->tx_total, sp->bytes_sent);
|
||||
__sync_fetch_and_add (&ecm->rx_total, sp->bytes_received);
|
||||
clib_atomic_fetch_add (&ecm->tx_total, sp->bytes_sent);
|
||||
clib_atomic_fetch_add (&ecm->rx_total, sp->bytes_received);
|
||||
s = session_get_from_handle_if_valid (sp->vpp_session_handle);
|
||||
|
||||
if (s)
|
||||
@ -276,7 +276,7 @@ echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
|
||||
vec_delete (connections_this_batch, 1, i);
|
||||
i--;
|
||||
__sync_fetch_and_add (&ecm->ready_connections, -1);
|
||||
clib_atomic_fetch_add (&ecm->ready_connections, -1);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -408,7 +408,7 @@ echo_clients_session_connected_callback (u32 app_index, u32 api_context,
|
||||
}
|
||||
|
||||
vec_add1 (ecm->connection_index_by_thread[thread_index], session_index);
|
||||
__sync_fetch_and_add (&ecm->ready_connections, 1);
|
||||
clib_atomic_fetch_add (&ecm->ready_connections, 1);
|
||||
if (ecm->ready_connections == ecm->expected_connections)
|
||||
{
|
||||
ecm->run_test = ECHO_CLIENTS_RUNNING;
|
||||
|
@ -52,14 +52,14 @@ typedef struct {
|
||||
static_always_inline
|
||||
void vlib_refcount_lock (volatile u32 *counter_lock)
|
||||
{
|
||||
while (__sync_lock_test_and_set (counter_lock, 1))
|
||||
while (clib_atomic_test_and_set (counter_lock))
|
||||
;
|
||||
}
|
||||
|
||||
static_always_inline
|
||||
void vlib_refcount_unlock (volatile u32 *counter_lock)
|
||||
{
|
||||
*counter_lock = 0;
|
||||
clib_atomic_release(counter_lock);
|
||||
}
|
||||
|
||||
void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user