Clean up multi-thread barrier-sync hold-down timer

Main thread: don't bother with the barrier sync hold-down timer if
none of the worker threads are busy.

Worker threads: avoid epoll_pwait (10ms timeout) when the
control-plane has been active in the last half-second.

Cherry-pick a recent dangling reference fix: pool_elt_at_index after
e.g. rx callback is required, in case the unix file pool expands.

DO NOT cherry-pick the patch into master!

Change-Id: I130dc72fd95c8a22716fa2f0f98ce35cdccc52d7
Signed-off-by: Dave Barach <dave@barachs.net>
This commit is contained in:
Dave Barach 2019-04-23 15:13:14 -04:00 committed by Dave Wallace
parent 15c31921a6
commit e1062522d5
4 changed files with 130 additions and 16 deletions

View File

@ -63,6 +63,9 @@ typedef struct vlib_main_t
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
/* Instruction level timing state. */
clib_time_t clib_time;
/* Offset from main thread time */
f64 time_offset;
f64 time_last_barrier_release;
/* Time stamp of last node dispatch. */
u64 cpu_time_last_node_dispatch;
@ -228,7 +231,7 @@ void vlib_worker_loop (vlib_main_t * vm);
always_inline f64
vlib_time_now (vlib_main_t * vm)
{
return clib_time_now (&vm->clib_time);
return clib_time_now (&vm->clib_time) + vm->time_offset;
}
always_inline f64

View File

@ -1367,7 +1367,9 @@ vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name)
f64 t_entry;
f64 t_open;
f64 t_closed;
f64 max_vector_rate, this_vector_rate;
u32 count;
int i;
if (vec_len (vlib_mains) < 2)
return;
@ -1388,23 +1390,44 @@ vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name)
return;
}
/*
* Need data to decide if we're working hard enough to honor
* the barrier hold-down timer.
*/
max_vector_rate = 0.0;
for (i = 1; i < vec_len (vlib_mains); i++)
{
this_vector_rate =
vlib_last_vectors_per_main_loop_as_f64 (vlib_mains[i]);
if (max_vector_rate < this_vector_rate)
max_vector_rate = this_vector_rate;
}
vlib_worker_threads[0].barrier_sync_count++;
/* Enforce minimum barrier open time to minimize packet loss */
ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT));
while (1)
/*
* If any worker thread seems busy, which we define
* as a vector rate above 10, we enforce the barrier hold-down timer
*/
if (max_vector_rate > 10.0)
{
now = vlib_time_now (vm);
/* Barrier hold-down timer expired? */
if (now >= vm->barrier_no_close_before)
break;
if ((vm->barrier_no_close_before - now)
> (2.0 * BARRIER_MINIMUM_OPEN_LIMIT))
while (1)
{
clib_warning ("clock change: would have waited for %.4f seconds",
(vm->barrier_no_close_before - now));
break;
now = vlib_time_now (vm);
/* Barrier hold-down timer expired? */
if (now >= vm->barrier_no_close_before)
break;
if ((vm->barrier_no_close_before - now)
> (2.0 * BARRIER_MINIMUM_OPEN_LIMIT))
{
clib_warning
("clock change: would have waited for %.4f seconds",
(vm->barrier_no_close_before - now));
break;
}
}
}
/* Record time of closure */
@ -1491,6 +1514,14 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm)
deadline = now + BARRIER_SYNC_TIMEOUT;
/*
* Note when we let go of the barrier.
* Workers can use this to derive a reasonably accurate
* time offset. See vlib_time_now(...)
*/
vm->time_last_barrier_release = vlib_time_now (vm);
CLIB_MEMORY_STORE_BARRIER ();
*vlib_worker_threads->wait_at_barrier = 0;
while (*vlib_worker_threads->workers_at_barrier > 0)
@ -1789,6 +1820,45 @@ threads_init (vlib_main_t * vm)
VLIB_INIT_FUNCTION (threads_init);
static clib_error_t *
show_clock_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
int i;
f64 now;
now = vlib_time_now (vm);
vlib_cli_output (vm, "Time now %.9f", now);
if (vec_len (vlib_mains) == 1)
return 0;
vlib_cli_output (vm, "Time last barrier release %.9f",
vm->time_last_barrier_release);
for (i = 1; i < vec_len (vlib_mains); i++)
{
if (vlib_mains[i] == 0)
continue;
vlib_cli_output (vm, "Thread %d offset %.9f error %.9f", i,
vlib_mains[i]->time_offset,
vm->time_last_barrier_release -
vlib_mains[i]->time_last_barrier_release);
}
return 0;
}
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (f_command, static) =
{
.path = "show clock",
.short_help = "show clock",
.function = show_clock_command_fn,
};
/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
*

View File

@ -423,6 +423,21 @@ vlib_worker_thread_barrier_check (void)
}
while (*vlib_worker_threads->wait_at_barrier)
;
/*
* Recompute the offset from thread-0 time.
* Note that vlib_time_now adds vm->time_offset, so
* clear it first. Save the resulting idea of "now", to
* see how well we're doing. See show_clock_command_fn(...)
*/
{
f64 now;
vm->time_offset = 0.0;
now = vlib_time_now (vm);
vm->time_offset = vlib_global_main.time_last_barrier_release - now;
vm->time_last_barrier_release = vlib_time_now (vm);
}
if (CLIB_DEBUG > 0)
vm->parked_at_barrier = 0;
clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);

View File

@ -145,9 +145,13 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_node_main_t *nm = &vm->node_main;
u32 ticks_until_expiration;
f64 timeout;
f64 now;
int timeout_ms = 0, max_timeout_ms = 10;
f64 vector_rate = vlib_last_vectors_per_main_loop (vm);
if (is_main == 0)
now = vlib_time_now (vm);
/*
* If we've been asked for a fixed-sleep between main loop polls,
* do so right away.
@ -194,8 +198,9 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
}
node->input_main_loops_per_call = 0;
}
else if (is_main == 0 && vector_rate < 2 &&
nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] == 0)
else if (is_main == 0 && vector_rate < 2
&& (vlib_global_main.time_last_barrier_release + 0.5 < now)
&& nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] == 0)
{
timeout = 10e-3;
timeout_ms = max_timeout_ms;
@ -223,12 +228,32 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
em->epoll_events,
vec_len (em->epoll_events), timeout_ms);
}
}
else
{
/*
* Worker thread, no epoll fd's, sleep for 100us at a time
* and check for a barrier sync request
*/
if (timeout_ms)
usleep (timeout_ms * 1000);
return 0;
{
struct timespec ts, tsrem;
f64 limit = now + (f64) timeout_ms * 1e-3;
while (vlib_time_now (vm) < limit)
{
/* Sleep for 100us at a time */
ts.tv_sec = 0;
ts.tv_nsec = 1000 * 100;
while (nanosleep (&ts, &tsrem) < 0)
ts = tsrem;
if (*vlib_worker_threads->wait_at_barrier)
goto done;
}
}
goto done;
}
}
@ -238,7 +263,7 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_panic_with_error (vm, clib_error_return_unix (0, "epoll_wait"));
/* non fatal error (e.g. EINTR). */
return 0;
goto done;
}
em->epoll_waits += 1;
@ -318,6 +343,7 @@ linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
}
}
done:
return 0;
}