vlib: introduce vlib_get_main_by_index(), vlib_get_n_threads()
Type: improvement Change-Id: If3da7d4338470912f37ff1794620418d928fb77f Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
@@ -418,7 +418,7 @@ acl_fa_inner_node_fn (vlib_main_t * vm,
|
||||
{
|
||||
trace_bitmap |= 0x80000000;
|
||||
}
|
||||
ASSERT (f_sess_id.thread_index < vec_len (vlib_mains));
|
||||
ASSERT (f_sess_id.thread_index < vlib_get_n_threads ());
|
||||
b[0]->error = no_error_existing_session;
|
||||
acl_check_needed = 0;
|
||||
pkts_exist_session += 1;
|
||||
|
||||
@@ -361,8 +361,9 @@ send_one_worker_interrupt (vlib_main_t * vm, acl_main_t * am,
|
||||
if (!pw->interrupt_is_pending)
|
||||
{
|
||||
pw->interrupt_is_pending = 1;
|
||||
vlib_node_set_interrupt_pending (vlib_mains[thread_index],
|
||||
acl_fa_worker_session_cleaner_process_node.index);
|
||||
vlib_node_set_interrupt_pending (
|
||||
vlib_get_main_by_index (thread_index),
|
||||
acl_fa_worker_session_cleaner_process_node.index);
|
||||
elog_acl_maybe_trace_X1 (am,
|
||||
"send_one_worker_interrupt: send interrupt to worker %u",
|
||||
"i4", ((u32) thread_index));
|
||||
@@ -560,7 +561,7 @@ send_interrupts_to_workers (vlib_main_t * vm, acl_main_t * am)
|
||||
{
|
||||
int i;
|
||||
/* Can't use vec_len(am->per_worker_data) since the threads might not have come up yet; */
|
||||
int n_threads = vec_len (vlib_mains);
|
||||
int n_threads = vlib_get_n_threads ();
|
||||
for (i = 0; i < n_threads; i++)
|
||||
{
|
||||
send_one_worker_interrupt (vm, am, i);
|
||||
@@ -600,7 +601,7 @@ acl_fa_session_cleaner_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
|
||||
*
|
||||
* Also, while we are at it, calculate the earliest we need to wake up.
|
||||
*/
|
||||
for (ti = 0; ti < vec_len (vlib_mains); ti++)
|
||||
for (ti = 0; ti < vlib_get_n_threads (); ti++)
|
||||
{
|
||||
if (ti >= vec_len (am->per_worker_data))
|
||||
{
|
||||
@@ -746,7 +747,7 @@ acl_fa_session_cleaner_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
|
||||
|
||||
/* now wait till they all complete */
|
||||
acl_log_info ("CLEANER mains len: %u per-worker len: %d",
|
||||
vec_len (vlib_mains),
|
||||
vlib_get_n_threads (),
|
||||
vec_len (am->per_worker_data));
|
||||
vec_foreach (pw0, am->per_worker_data)
|
||||
{
|
||||
|
||||
@@ -466,7 +466,7 @@ acl_fa_can_add_session (acl_main_t * am, int is_input, u32 sw_if_index)
|
||||
{
|
||||
u64 curr_sess_count;
|
||||
curr_sess_count = am->fa_session_total_adds - am->fa_session_total_dels;
|
||||
return (curr_sess_count + vec_len (vlib_mains) <
|
||||
return (curr_sess_count + vlib_get_n_threads () <
|
||||
am->fa_conn_table_max_entries);
|
||||
}
|
||||
|
||||
|
||||
@@ -1735,7 +1735,7 @@ dpdk_cryptodev_init (vlib_main_t * vm)
|
||||
for (i = skip_master; i < tm->n_vlib_mains; i++)
|
||||
{
|
||||
ptd = cmt->per_thread_data + i;
|
||||
numa = vlib_mains[i]->numa_node;
|
||||
numa = vlib_get_main_by_index (i)->numa_node;
|
||||
|
||||
ptd->aad_buf = rte_zmalloc_socket (0, CRYPTODEV_NB_CRYPTO_OPS *
|
||||
CRYPTODEV_MAX_AAD_SIZE,
|
||||
|
||||
@@ -1041,13 +1041,13 @@ timer_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
|
||||
vec_reset_length (event_data);
|
||||
|
||||
int i;
|
||||
if (vec_len (vlib_mains) == 0)
|
||||
if (vlib_get_n_threads () == 0)
|
||||
vec_add1 (worker_vms, vm);
|
||||
else
|
||||
{
|
||||
for (i = 0; i < vec_len (vlib_mains); i++)
|
||||
for (i = 0; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
worker_vm = vlib_mains[i];
|
||||
worker_vm = vlib_get_main_by_index (i);
|
||||
if (worker_vm)
|
||||
vec_add1 (worker_vms, worker_vm);
|
||||
}
|
||||
|
||||
@@ -940,7 +940,7 @@ echo_clients_command_fn (vlib_main_t * vm,
|
||||
|
||||
/* Turn on the builtin client input nodes */
|
||||
for (i = 0; i < thread_main->n_vlib_mains; i++)
|
||||
vlib_node_set_state (vlib_mains[i], echo_clients_node.index,
|
||||
vlib_node_set_state (vlib_get_main_by_index (i), echo_clients_node.index,
|
||||
VLIB_NODE_STATE_POLLING);
|
||||
|
||||
if (preallocate_sessions)
|
||||
|
||||
@@ -209,7 +209,7 @@ static void
|
||||
http_process_free (http_server_args * args)
|
||||
{
|
||||
vlib_node_runtime_t *rt;
|
||||
vlib_main_t *vm = &vlib_global_main;
|
||||
vlib_main_t *vm = vlib_get_first_main ();
|
||||
http_server_main_t *hsm = &http_server_main;
|
||||
vlib_node_t *n;
|
||||
u32 node_index;
|
||||
|
||||
@@ -147,20 +147,21 @@ mdata_enable_disable (mdata_main_t * mmp, int enable_disable)
|
||||
if (vec_len (mmp->before_per_thread) == 0)
|
||||
{
|
||||
mdata_none.node_index = ~0;
|
||||
vec_validate (mmp->before_per_thread, vec_len (vlib_mains) - 1);
|
||||
vec_validate (mmp->before_per_thread, vlib_get_n_threads () - 1);
|
||||
}
|
||||
|
||||
/* Reset the per-node accumulator, see vec_validate_init_empty above */
|
||||
vec_reset_length (mmp->modifies);
|
||||
|
||||
for (i = 0; i < vec_len (vlib_mains); i++)
|
||||
for (i = 0; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
if (vlib_mains[i] == 0)
|
||||
vlib_main_t *ovm = vlib_get_main_by_index (i);
|
||||
if (ovm == 0)
|
||||
continue;
|
||||
|
||||
clib_callback_data_enable_disable
|
||||
(&vlib_mains[i]->vlib_node_runtime_perf_callbacks,
|
||||
mdata_trace_callback, enable_disable);
|
||||
clib_callback_data_enable_disable (
|
||||
&ovm->vlib_node_runtime_perf_callbacks, mdata_trace_callback,
|
||||
enable_disable);
|
||||
}
|
||||
|
||||
return rv;
|
||||
|
||||
@@ -413,7 +413,7 @@ VNET_DEVICE_CLASS_TX_FN (memif_device_class) (vlib_main_t * vm,
|
||||
thread_index);
|
||||
u8 tx_queues = vec_len (mif->tx_queues);
|
||||
|
||||
if (tx_queues < vec_len (vlib_mains))
|
||||
if (tx_queues < vlib_get_n_threads ())
|
||||
{
|
||||
ASSERT (tx_queues > 0);
|
||||
mq = vec_elt_at_index (mif->tx_queues, thread_index % tx_queues);
|
||||
|
||||
@@ -306,8 +306,8 @@ memif_connect (memif_if_t * mif)
|
||||
mq->int_clib_file_index);
|
||||
}
|
||||
ti = vnet_hw_if_get_rx_queue_thread_index (vnm, qi);
|
||||
mq->buffer_pool_index =
|
||||
vlib_buffer_pool_get_default_for_numa (vm, vlib_mains[ti]->numa_node);
|
||||
mq->buffer_pool_index = vlib_buffer_pool_get_default_for_numa (
|
||||
vm, vlib_get_main_by_index (ti)->numa_node);
|
||||
rv = vnet_hw_if_set_rx_queue_mode (vnm, qi, VNET_HW_IF_RX_MODE_DEFAULT);
|
||||
vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index);
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#define MEMIF_DEFAULT_TX_QUEUES 1
|
||||
#define MEMIF_DEFAULT_BUFFER_SIZE 2048
|
||||
|
||||
#define MEMIF_MAX_M2S_RING (vec_len (vlib_mains))
|
||||
#define MEMIF_MAX_M2S_RING (vlib_get_n_threads ())
|
||||
#define MEMIF_MAX_S2M_RING 256
|
||||
#define MEMIF_MAX_REGION 256
|
||||
#define MEMIF_MAX_LOG2_RING_SIZE 14
|
||||
|
||||
@@ -1288,12 +1288,12 @@ nat_ipfix_flush_from_main (void)
|
||||
|
||||
if (PREDICT_FALSE (!silm->worker_vms))
|
||||
{
|
||||
for (i = 1; i < vec_len (vlib_mains); i++)
|
||||
{
|
||||
worker_vm = vlib_mains[i];
|
||||
if (worker_vm)
|
||||
vec_add1 (silm->worker_vms, worker_vm);
|
||||
}
|
||||
for (i = 1; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
worker_vm = vlib_get_main_by_index (i);
|
||||
if (worker_vm)
|
||||
vec_add1 (silm->worker_vms, worker_vm);
|
||||
}
|
||||
}
|
||||
|
||||
/* Trigger flush for each worker thread */
|
||||
|
||||
@@ -711,7 +711,7 @@ nat_ha_send (vlib_frame_t * f, vlib_buffer_t * b, u8 is_resync,
|
||||
nat_ha_message_header_t *h;
|
||||
ip4_header_t *ip;
|
||||
udp_header_t *udp;
|
||||
vlib_main_t *vm = vlib_mains[thread_index];
|
||||
vlib_main_t *vm = vlib_get_main_by_index (thread_index);
|
||||
|
||||
ip = vlib_buffer_get_current (b);
|
||||
udp = ip4_next_header (ip);
|
||||
@@ -737,7 +737,7 @@ nat_ha_event_add (nat_ha_event_t * event, u8 do_flush, u32 thread_index,
|
||||
nat44_ei_main_t *nm = &nat44_ei_main;
|
||||
nat_ha_main_t *ha = &nat_ha_main;
|
||||
nat_ha_per_thread_data_t *td = &ha->per_thread_data[thread_index];
|
||||
vlib_main_t *vm = vlib_mains[thread_index];
|
||||
vlib_main_t *vm = vlib_get_main_by_index (thread_index);
|
||||
vlib_buffer_t *b = 0;
|
||||
vlib_frame_t *f;
|
||||
u32 bi = ~0, offset;
|
||||
@@ -967,12 +967,12 @@ nat_ha_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
|
||||
vlib_process_wait_for_event_or_clock (vm, 1.0);
|
||||
event_type = vlib_process_get_events (vm, &event_data);
|
||||
vec_reset_length (event_data);
|
||||
for (ti = 0; ti < vec_len (vlib_mains); ti++)
|
||||
for (ti = 0; ti < vlib_get_n_threads (); ti++)
|
||||
{
|
||||
if (ti >= vec_len (ha->per_thread_data))
|
||||
continue;
|
||||
|
||||
vlib_node_set_interrupt_pending (vlib_mains[ti],
|
||||
vlib_node_set_interrupt_pending (vlib_get_main_by_index (ti),
|
||||
nat_ha_worker_node.index);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -998,7 +998,7 @@ nat64_add_del_static_bib_entry (ip6_address_t * in_addr,
|
||||
static_bib->is_add = is_add;
|
||||
static_bib->thread_index = thread_index;
|
||||
static_bib->done = 0;
|
||||
worker_vm = vlib_mains[thread_index];
|
||||
worker_vm = vlib_get_main_by_index (thread_index);
|
||||
if (worker_vm)
|
||||
vlib_node_set_interrupt_pending (worker_vm,
|
||||
nat64_static_bib_worker_node.index);
|
||||
@@ -1452,13 +1452,13 @@ nat64_expire_walk_fn (vlib_main_t * vm, vlib_node_runtime_t * rt,
|
||||
int i;
|
||||
uword event_type, *event_data = 0;
|
||||
|
||||
if (vec_len (vlib_mains) == 0)
|
||||
if (vlib_get_n_threads () == 0)
|
||||
vec_add1 (worker_vms, vm);
|
||||
else
|
||||
{
|
||||
for (i = 0; i < vec_len (vlib_mains); i++)
|
||||
for (i = 0; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
worker_vm = vlib_mains[i];
|
||||
worker_vm = vlib_get_main_by_index (i);
|
||||
if (worker_vm)
|
||||
vec_add1 (worker_vms, worker_vm);
|
||||
}
|
||||
|
||||
@@ -347,7 +347,7 @@ extern vlib_node_registration_t pnat_input_node;
|
||||
static void test_table(test_t *t, int no_tests) {
|
||||
// walk through table of tests
|
||||
int i;
|
||||
vlib_main_t *vm = &vlib_global_main;
|
||||
vlib_main_t *vm = vlib_get_first_main();
|
||||
|
||||
/* Generate packet data */
|
||||
for (i = 0; i < no_tests; i++) {
|
||||
@@ -376,7 +376,7 @@ static void test_table(test_t *t, int no_tests) {
|
||||
void test_performance(void) {
|
||||
pnat_main_t *pm = &pnat_main;
|
||||
int i;
|
||||
vlib_main_t *vm = &vlib_global_main;
|
||||
vlib_main_t *vm = vlib_get_first_main();
|
||||
|
||||
for (i = 0; i < sizeof(rules) / sizeof(rules[0]); i++) {
|
||||
add_translation(&rules[i]);
|
||||
@@ -505,7 +505,7 @@ void test_api(void) {
|
||||
|
||||
void test_checksum(void) {
|
||||
int i;
|
||||
vlib_main_t *vm = &vlib_global_main;
|
||||
vlib_main_t *vm = vlib_get_first_main();
|
||||
pnat_main_t *pm = &pnat_main;
|
||||
|
||||
test_t test = {
|
||||
@@ -559,7 +559,7 @@ int main(int argc, char **argv) {
|
||||
|
||||
clib_mem_init(0, 3ULL << 30);
|
||||
|
||||
vlib_main_t *vm = &vlib_global_main;
|
||||
vlib_main_t *vm = vlib_get_first_main();
|
||||
|
||||
buffers_vector = buffer_init(buffers_vector, 256);
|
||||
|
||||
|
||||
@@ -215,7 +215,7 @@ nsim_configure (nsim_main_t * nsm, f64 bandwidth, f64 delay, f64 packet_size,
|
||||
i = (!nsm->poll_main_thread && num_workers) ? 1 : 0;
|
||||
for (; i < num_workers + 1; i++)
|
||||
{
|
||||
vlib_main_t *this_vm = vlib_mains[i];
|
||||
vlib_main_t *this_vm = vlib_get_main_by_index (i);
|
||||
|
||||
vlib_node_set_state (this_vm, nsim_input_node.index,
|
||||
VLIB_NODE_STATE_POLLING);
|
||||
|
||||
@@ -48,8 +48,8 @@ perfmon_reset (vlib_main_t *vm)
|
||||
uword page_size = clib_mem_get_page_size ();
|
||||
|
||||
if (pm->is_running)
|
||||
for (int i = 0; i < vec_len (vlib_mains); i++)
|
||||
vlib_node_set_dispatch_wrapper (vlib_mains[i], 0);
|
||||
for (int i = 0; i < vlib_get_n_threads (); i++)
|
||||
vlib_node_set_dispatch_wrapper (vlib_get_main_by_index (i), 0);
|
||||
|
||||
for (int i = 0; i < vec_len (pm->fds_to_close); i++)
|
||||
close (pm->fds_to_close[i]);
|
||||
@@ -104,7 +104,7 @@ perfmon_set (vlib_main_t *vm, perfmon_bundle_t *b)
|
||||
{
|
||||
vec_add2 (pm->default_instance_type, it, 1);
|
||||
it->name = is_node ? "Thread/Node" : "Thread";
|
||||
for (int i = 0; i < vec_len (vlib_mains); i++)
|
||||
for (int i = 0; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
vlib_worker_thread_t *w = vlib_worker_threads + i;
|
||||
perfmon_instance_t *in;
|
||||
@@ -114,7 +114,7 @@ perfmon_set (vlib_main_t *vm, perfmon_bundle_t *b)
|
||||
in->name = (char *) format (0, "%s (%u)%c", w->name, i, 0);
|
||||
}
|
||||
if (is_node)
|
||||
vec_validate (pm->thread_runtimes, vec_len (vlib_mains) - 1);
|
||||
vec_validate (pm->thread_runtimes, vlib_get_n_threads () - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -234,8 +234,8 @@ perfmon_start (vlib_main_t *vm)
|
||||
}
|
||||
if (pm->active_bundle->type == PERFMON_BUNDLE_TYPE_NODE)
|
||||
{
|
||||
for (int i = 0; i < vec_len (vlib_mains); i++)
|
||||
vlib_node_set_dispatch_wrapper (vlib_mains[i],
|
||||
for (int i = 0; i < vlib_get_n_threads (); i++)
|
||||
vlib_node_set_dispatch_wrapper (vlib_get_main_by_index (i),
|
||||
perfmon_dispatch_wrapper);
|
||||
}
|
||||
pm->sample_time = vlib_time_now (vm);
|
||||
@@ -254,8 +254,8 @@ perfmon_stop (vlib_main_t *vm)
|
||||
|
||||
if (pm->active_bundle->type == PERFMON_BUNDLE_TYPE_NODE)
|
||||
{
|
||||
for (int i = 0; i < vec_len (vlib_mains); i++)
|
||||
vlib_node_set_dispatch_wrapper (vlib_mains[i], 0);
|
||||
for (int i = 0; i < vlib_get_n_threads (); i++)
|
||||
vlib_node_set_dispatch_wrapper (vlib_get_main_by_index (i), 0);
|
||||
}
|
||||
|
||||
for (int i = 0; i < n_groups; i++)
|
||||
|
||||
@@ -224,10 +224,10 @@ vl_api_trace_dump_t_handler (vl_api_trace_dump_t * mp)
|
||||
if (vec_len (client_trace_cache) == 0
|
||||
&& (iterator_thread_id != ~0 || iterator_position != ~0))
|
||||
{
|
||||
vlib_worker_thread_barrier_sync (&vlib_global_main);
|
||||
vlib_worker_thread_barrier_sync (vlib_get_first_main ());
|
||||
|
||||
/* Make a slot for each worker thread */
|
||||
vec_validate (client_trace_cache, vec_len (vlib_mains) - 1);
|
||||
vec_validate (client_trace_cache, vlib_get_n_threads () - 1);
|
||||
i = 0;
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
@@ -250,7 +250,7 @@ vl_api_trace_dump_t_handler (vl_api_trace_dump_t * mp)
|
||||
i++;
|
||||
}));
|
||||
/* *INDENT-ON* */
|
||||
vlib_worker_thread_barrier_release (&vlib_global_main);
|
||||
vlib_worker_thread_barrier_release (vlib_get_first_main ());
|
||||
}
|
||||
|
||||
/* Save the cache, one way or the other */
|
||||
@@ -268,7 +268,8 @@ vl_api_trace_dump_t_handler (vl_api_trace_dump_t * mp)
|
||||
|
||||
vec_reset_length (s);
|
||||
|
||||
s = format (s, "%U", format_vlib_trace, &vlib_global_main, th[0]);
|
||||
s =
|
||||
format (s, "%U", format_vlib_trace, vlib_get_first_main (), th[0]);
|
||||
|
||||
dmp = vl_msg_api_alloc (sizeof (*dmp) + vec_len (s));
|
||||
dmp->_vl_msg_id =
|
||||
|
||||
+2
-2
@@ -569,7 +569,7 @@ vlib_buffer_pool_create (vlib_main_t * vm, char *name, u32 data_size,
|
||||
bp->data_size = data_size;
|
||||
bp->numa_node = m->numa_node;
|
||||
|
||||
vec_validate_aligned (bp->threads, vec_len (vlib_mains) - 1,
|
||||
vec_validate_aligned (bp->threads, vlib_get_n_threads () - 1,
|
||||
CLIB_CACHE_LINE_BYTES);
|
||||
|
||||
alloc_size = vlib_buffer_alloc_size (bm->ext_hdr_size, data_size);
|
||||
@@ -673,7 +673,7 @@ vlib_buffer_worker_init (vlib_main_t * vm)
|
||||
vec_foreach (bp, bm->buffer_pools)
|
||||
{
|
||||
clib_spinlock_lock (&bp->lock);
|
||||
vec_validate_aligned (bp->threads, vec_len (vlib_mains) - 1,
|
||||
vec_validate_aligned (bp->threads, vlib_get_n_threads () - 1,
|
||||
CLIB_CACHE_LINE_BYTES);
|
||||
clib_spinlock_unlock (&bp->lock);
|
||||
}
|
||||
|
||||
@@ -545,7 +545,8 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
|
||||
{
|
||||
hf->n_vectors = VLIB_FRAME_SIZE;
|
||||
vlib_put_frame_queue_elt (hf);
|
||||
vlib_mains[current_thread_index]->check_frame_queues = 1;
|
||||
vlib_get_main_by_index (current_thread_index)->check_frame_queues =
|
||||
1;
|
||||
current_thread_index = ~0;
|
||||
ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
|
||||
hf = 0;
|
||||
@@ -574,7 +575,7 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
|
||||
if (1 || hf->n_vectors == hf->last_n_vectors)
|
||||
{
|
||||
vlib_put_frame_queue_elt (hf);
|
||||
vlib_mains[i]->check_frame_queues = 1;
|
||||
vlib_get_main_by_index (i)->check_frame_queues = 1;
|
||||
ptd->handoff_queue_elt_by_thread_index[i] = 0;
|
||||
}
|
||||
else
|
||||
|
||||
+22
-4
@@ -19,13 +19,31 @@
|
||||
#ifndef included_vlib_global_funcs_h_
|
||||
#define included_vlib_global_funcs_h_
|
||||
|
||||
always_inline u32
|
||||
vlib_get_n_threads ()
|
||||
{
|
||||
return vec_len (vlib_mains);
|
||||
}
|
||||
|
||||
always_inline vlib_main_t *
|
||||
vlib_get_main_by_index (u32 thread_index)
|
||||
{
|
||||
vlib_main_t *vm;
|
||||
vm = vlib_mains[thread_index];
|
||||
ASSERT (vm);
|
||||
return vm;
|
||||
}
|
||||
|
||||
always_inline vlib_main_t *
|
||||
vlib_get_main (void)
|
||||
{
|
||||
vlib_main_t *vm;
|
||||
vm = vlib_mains[vlib_get_thread_index ()];
|
||||
ASSERT (vm);
|
||||
return vm;
|
||||
return vlib_get_main_by_index (vlib_get_thread_index ());
|
||||
}
|
||||
|
||||
always_inline vlib_main_t *
|
||||
vlib_get_first_main (void)
|
||||
{
|
||||
return vlib_get_main_by_index (0);
|
||||
}
|
||||
|
||||
always_inline vlib_thread_main_t *
|
||||
|
||||
+5
-4
@@ -613,9 +613,9 @@ vlib_node_get_nodes (vlib_main_t * vm, u32 max_threads, int include_stats,
|
||||
|
||||
if (vec_len (stat_vms) == 0)
|
||||
{
|
||||
for (i = 0; i < vec_len (vlib_mains); i++)
|
||||
for (i = 0; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
stat_vm = vlib_mains[i];
|
||||
stat_vm = vlib_get_main_by_index (i);
|
||||
if (stat_vm)
|
||||
vec_add1 (stat_vms, stat_vm);
|
||||
}
|
||||
@@ -837,10 +837,11 @@ vlib_node_set_march_variant (vlib_main_t *vm, u32 node_index,
|
||||
{
|
||||
n->function = fnr->function;
|
||||
|
||||
for (int i = 0; i < vec_len (vlib_mains); i++)
|
||||
for (int i = 0; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
vlib_node_runtime_t *nrt;
|
||||
nrt = vlib_node_get_runtime (vlib_mains[i], n->index);
|
||||
nrt =
|
||||
vlib_node_get_runtime (vlib_get_main_by_index (i), n->index);
|
||||
nrt->function = fnr->function;
|
||||
}
|
||||
return 0;
|
||||
|
||||
+10
-10
@@ -210,14 +210,14 @@ show_node_graphviz (vlib_main_t * vm,
|
||||
/* Updating the stats for multithreaded use cases.
|
||||
* We need to dup the nodes to sum the stats from all threads.*/
|
||||
nodes = vec_dup (nm->nodes);
|
||||
for (i = 1; i < vec_len (vlib_mains); i++)
|
||||
for (i = 1; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
vlib_node_main_t *nm_clone;
|
||||
vlib_main_t *vm_clone;
|
||||
vlib_node_runtime_t *rt;
|
||||
vlib_node_t *n;
|
||||
|
||||
vm_clone = vlib_mains[i];
|
||||
vm_clone = vlib_get_main_by_index (i);
|
||||
nm_clone = &vm_clone->node_main;
|
||||
|
||||
for (j = 0; j < vec_len (nm_clone->nodes); j++)
|
||||
@@ -516,9 +516,9 @@ show_node_runtime (vlib_main_t * vm,
|
||||
|| unformat (input, "su"))
|
||||
summary = 1;
|
||||
|
||||
for (i = 0; i < vec_len (vlib_mains); i++)
|
||||
for (i = 0; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
stat_vm = vlib_mains[i];
|
||||
stat_vm = vlib_get_main_by_index (i);
|
||||
if (stat_vm)
|
||||
vec_add1 (stat_vms, stat_vm);
|
||||
}
|
||||
@@ -592,7 +592,7 @@ show_node_runtime (vlib_main_t * vm,
|
||||
}
|
||||
}
|
||||
|
||||
if (vec_len (vlib_mains) > 1)
|
||||
if (vlib_get_n_threads () > 1)
|
||||
{
|
||||
vlib_worker_thread_t *w = vlib_worker_threads + j;
|
||||
if (j > 0)
|
||||
@@ -665,9 +665,9 @@ clear_node_runtime (vlib_main_t * vm,
|
||||
vlib_main_t **stat_vms = 0, *stat_vm;
|
||||
vlib_node_runtime_t *r;
|
||||
|
||||
for (i = 0; i < vec_len (vlib_mains); i++)
|
||||
for (i = 0; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
stat_vm = vlib_mains[i];
|
||||
stat_vm = vlib_get_main_by_index (i);
|
||||
if (stat_vm)
|
||||
vec_add1 (stat_vms, stat_vm);
|
||||
}
|
||||
@@ -848,10 +848,10 @@ show_node (vlib_main_t * vm, unformat_input_t * input,
|
||||
|
||||
s = format (s, "\n%8s %=12s %=12s %=12s %=12s %=12s\n", "Thread", "Calls",
|
||||
"Clocks", "Vectors", "Max Clock", "Max Vectors");
|
||||
for (i = 0; i < vec_len (vlib_mains); i++)
|
||||
for (i = 0; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
n = vlib_get_node (vlib_mains[i], node_index);
|
||||
vlib_node_sync_stats (vlib_mains[i], n);
|
||||
n = vlib_get_node (vlib_get_main_by_index (i), node_index);
|
||||
vlib_node_sync_stats (vlib_get_main_by_index (i), n);
|
||||
|
||||
cl = n->stats_total.clocks - n->stats_last_clear.clocks;
|
||||
ca = n->stats_total.calls - n->stats_last_clear.calls;
|
||||
|
||||
+29
-26
@@ -574,7 +574,8 @@ vlib_worker_thread_bootstrap_fn (void *arg)
|
||||
|
||||
__os_thread_index = w - vlib_worker_threads;
|
||||
|
||||
vlib_process_start_switch_stack (vlib_mains[__os_thread_index], 0);
|
||||
vlib_process_start_switch_stack (vlib_get_main_by_index (__os_thread_index),
|
||||
0);
|
||||
rv = (void *) clib_calljmp
|
||||
((uword (*)(uword)) w->thread_function,
|
||||
(uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE);
|
||||
@@ -1001,7 +1002,7 @@ worker_thread_node_runtime_update_internal (void)
|
||||
|
||||
ASSERT (vlib_get_thread_index () == 0);
|
||||
|
||||
vm = vlib_mains[0];
|
||||
vm = vlib_get_first_main ();
|
||||
nm = &vm->node_main;
|
||||
|
||||
ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
|
||||
@@ -1017,11 +1018,11 @@ worker_thread_node_runtime_update_internal (void)
|
||||
vlib_node_sync_stats (vm, n);
|
||||
}
|
||||
|
||||
for (i = 1; i < vec_len (vlib_mains); i++)
|
||||
for (i = 1; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
vlib_node_t *n;
|
||||
|
||||
vm_clone = vlib_mains[i];
|
||||
vm_clone = vlib_get_main_by_index (i);
|
||||
nm_clone = &vm_clone->node_main;
|
||||
|
||||
for (j = 0; j < vec_len (nm_clone->nodes); j++)
|
||||
@@ -1049,7 +1050,7 @@ vlib_worker_thread_node_refork (void)
|
||||
|
||||
int j;
|
||||
|
||||
vm = vlib_mains[0];
|
||||
vm = vlib_get_first_main ();
|
||||
nm = &vm->node_main;
|
||||
vm_clone = vlib_get_main ();
|
||||
nm_clone = &vm_clone->node_main;
|
||||
@@ -1425,7 +1426,7 @@ vlib_worker_thread_initial_barrier_sync_and_release (vlib_main_t * vm)
|
||||
{
|
||||
f64 deadline;
|
||||
f64 now = vlib_time_now (vm);
|
||||
u32 count = vec_len (vlib_mains) - 1;
|
||||
u32 count = vlib_get_n_threads () - 1;
|
||||
|
||||
/* No worker threads? */
|
||||
if (count == 0)
|
||||
@@ -1451,7 +1452,7 @@ vlib_worker_thread_initial_barrier_sync_and_release (vlib_main_t * vm)
|
||||
u8
|
||||
vlib_worker_thread_barrier_held (void)
|
||||
{
|
||||
if (vec_len (vlib_mains) < 2)
|
||||
if (vlib_get_n_threads () < 2)
|
||||
return (1);
|
||||
|
||||
return (*vlib_worker_threads->wait_at_barrier == 1);
|
||||
@@ -1469,13 +1470,13 @@ vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name)
|
||||
u32 count;
|
||||
int i;
|
||||
|
||||
if (vec_len (vlib_mains) < 2)
|
||||
if (vlib_get_n_threads () < 2)
|
||||
return;
|
||||
|
||||
ASSERT (vlib_get_thread_index () == 0);
|
||||
|
||||
vlib_worker_threads[0].barrier_caller = func_name;
|
||||
count = vec_len (vlib_mains) - 1;
|
||||
count = vlib_get_n_threads () - 1;
|
||||
|
||||
/* Record entry relative to last close */
|
||||
now = vlib_time_now (vm);
|
||||
@@ -1497,10 +1498,12 @@ vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name)
|
||||
* the barrier hold-down timer.
|
||||
*/
|
||||
max_vector_rate = 0.0;
|
||||
for (i = 1; i < vec_len (vlib_mains); i++)
|
||||
max_vector_rate =
|
||||
clib_max (max_vector_rate,
|
||||
(f64) vlib_last_vectors_per_main_loop (vlib_mains[i]));
|
||||
for (i = 1; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
vlib_main_t *ovm = vlib_get_main_by_index (i);
|
||||
max_vector_rate = clib_max (max_vector_rate,
|
||||
(f64) vlib_last_vectors_per_main_loop (ovm));
|
||||
}
|
||||
|
||||
vlib_worker_threads[0].barrier_sync_count++;
|
||||
|
||||
@@ -1562,7 +1565,7 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm)
|
||||
f64 t_update_main = 0.0;
|
||||
int refork_needed = 0;
|
||||
|
||||
if (vec_len (vlib_mains) < 2)
|
||||
if (vlib_get_n_threads () < 2)
|
||||
return;
|
||||
|
||||
ASSERT (vlib_get_thread_index () == 0);
|
||||
@@ -1594,7 +1597,7 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm)
|
||||
/* Do per thread rebuilds in parallel */
|
||||
refork_needed = 1;
|
||||
clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
|
||||
(vec_len (vlib_mains) - 1));
|
||||
(vlib_get_n_threads () - 1));
|
||||
now = vlib_time_now (vm);
|
||||
t_update_main = now - vm->barrier_epoch;
|
||||
}
|
||||
@@ -1668,7 +1671,7 @@ vlib_worker_wait_one_loop (void)
|
||||
{
|
||||
ASSERT (vlib_get_thread_index () == 0);
|
||||
|
||||
if (vec_len (vlib_mains) < 2)
|
||||
if (vlib_get_n_threads () < 2)
|
||||
return;
|
||||
|
||||
if (vlib_worker_thread_barrier_held ())
|
||||
@@ -1677,7 +1680,7 @@ vlib_worker_wait_one_loop (void)
|
||||
u32 *counts = 0;
|
||||
u32 ii;
|
||||
|
||||
vec_validate (counts, vec_len (vlib_mains) - 1);
|
||||
vec_validate (counts, vlib_get_n_threads () - 1);
|
||||
|
||||
/* record the current loop counts */
|
||||
vec_foreach_index (ii, vlib_mains)
|
||||
@@ -1973,24 +1976,24 @@ show_clock_command_fn (vlib_main_t * vm,
|
||||
verbose, format_clib_timebase_time,
|
||||
clib_timebase_now (tb));
|
||||
|
||||
if (vec_len (vlib_mains) == 1)
|
||||
if (vlib_get_n_threads () == 1)
|
||||
return 0;
|
||||
|
||||
vlib_cli_output (vm, "Time last barrier release %.9f",
|
||||
vm->time_last_barrier_release);
|
||||
|
||||
for (i = 1; i < vec_len (vlib_mains); i++)
|
||||
for (i = 1; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
if (vlib_mains[i] == 0)
|
||||
vlib_main_t *ovm = vlib_get_main_by_index (i);
|
||||
if (ovm == 0)
|
||||
continue;
|
||||
|
||||
vlib_cli_output (vm, "%d: %U", i, format_clib_time,
|
||||
&vlib_mains[i]->clib_time, verbose);
|
||||
vlib_cli_output (vm, "%d: %U", i, format_clib_time, &ovm->clib_time,
|
||||
verbose);
|
||||
|
||||
vlib_cli_output (vm, "Thread %d offset %.9f error %.9f", i,
|
||||
vlib_mains[i]->time_offset,
|
||||
vm->time_last_barrier_release -
|
||||
vlib_mains[i]->time_last_barrier_release);
|
||||
vlib_cli_output (
|
||||
vm, "Thread %d offset %.9f error %.9f", i, ovm->time_offset,
|
||||
vm->time_last_barrier_release - ovm->time_last_barrier_release);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
+1
-1
@@ -521,7 +521,7 @@ vlib_get_worker_vlib_main (u32 worker_index)
|
||||
vlib_main_t *vm;
|
||||
vlib_thread_main_t *tm = &vlib_thread_main;
|
||||
ASSERT (worker_index < tm->n_vlib_mains - 1);
|
||||
vm = vlib_mains[worker_index + 1];
|
||||
vm = vlib_get_main_by_index (worker_index + 1);
|
||||
ASSERT (vm);
|
||||
return vm;
|
||||
}
|
||||
|
||||
+2
-2
@@ -2886,9 +2886,9 @@ unix_cli_file_add (unix_cli_main_t * cm, char *name, int fd)
|
||||
* the same new name.
|
||||
* Then, throw away the old shared name-vector.
|
||||
*/
|
||||
for (i = 0; i < vec_len (vlib_mains); i++)
|
||||
for (i = 0; i < vlib_get_n_threads (); i++)
|
||||
{
|
||||
this_vlib_main = vlib_mains[i];
|
||||
this_vlib_main = vlib_get_main_by_index (i);
|
||||
if (this_vlib_main == 0)
|
||||
continue;
|
||||
n = vlib_get_node (this_vlib_main,
|
||||
|
||||
@@ -198,9 +198,9 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
}
|
||||
node->input_main_loops_per_call = 0;
|
||||
}
|
||||
else if (is_main == 0 && vector_rate < 2
|
||||
&& (vlib_global_main.time_last_barrier_release + 0.5 < now)
|
||||
&& nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] == 0)
|
||||
else if (is_main == 0 && vector_rate < 2 &&
|
||||
(vlib_get_first_main ()->time_last_barrier_release + 0.5 < now) &&
|
||||
nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] == 0)
|
||||
{
|
||||
timeout = 10e-3;
|
||||
timeout_ms = max_timeout_ms;
|
||||
|
||||
@@ -692,7 +692,7 @@ vlib_thread_stack_init (uword thread_index)
|
||||
int
|
||||
vlib_unix_main (int argc, char *argv[])
|
||||
{
|
||||
vlib_main_t *vm = &vlib_global_main; /* one and only time for this! */
|
||||
vlib_main_t *vm = vlib_get_first_main (); /* one and only time for this! */
|
||||
unformat_input_t input;
|
||||
clib_error_t *e;
|
||||
int i;
|
||||
|
||||
@@ -561,7 +561,7 @@ vl_api_rpc_call_reply_t_handler (vl_api_rpc_call_reply_t * mp)
|
||||
void
|
||||
vl_api_send_pending_rpc_requests (vlib_main_t * vm)
|
||||
{
|
||||
vlib_main_t *vm_global = &vlib_global_main;
|
||||
vlib_main_t *vm_global = vlib_get_first_main ();
|
||||
|
||||
ASSERT (vm != vm_global);
|
||||
|
||||
@@ -576,7 +576,7 @@ vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length,
|
||||
u8 force_rpc)
|
||||
{
|
||||
vl_api_rpc_call_t *mp;
|
||||
vlib_main_t *vm_global = &vlib_global_main;
|
||||
vlib_main_t *vm_global = vlib_get_first_main ();
|
||||
vlib_main_t *vm = vlib_get_main ();
|
||||
|
||||
/* Main thread and not a forced RPC: call the function directly */
|
||||
|
||||
@@ -331,8 +331,8 @@ show_crypto_async_status_command_fn (vlib_main_t * vm,
|
||||
|
||||
for (i = skip_master; i < tm->n_vlib_mains; i++)
|
||||
{
|
||||
vlib_node_state_t state =
|
||||
vlib_node_get_state (vlib_mains[i], cm->crypto_node_index);
|
||||
vlib_node_state_t state = vlib_node_get_state (
|
||||
vlib_get_main_by_index (i), cm->crypto_node_index);
|
||||
if (state == VLIB_NODE_STATE_POLLING)
|
||||
vlib_cli_output (vm, "threadId: %-6d POLLING", i);
|
||||
if (state == VLIB_NODE_STATE_INTERRUPT)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user