session: reschedule asap when snd space constrained

Type: feature

Improves fairness for sessions that are snd space or pacer constrained.

Change-Id: Ida5f523090f1dcbfb17bf5116bc7917747ac8593
Signed-off-by: Florin Coras <fcoras@cisco.com>
This commit is contained in:
Florin Coras
2019-11-02 14:32:52 -07:00
parent a1f5a956e5
commit dd97a48d9f
2 changed files with 28 additions and 7 deletions

View File

@ -221,6 +221,14 @@ session_evt_add_old (session_worker_t * wrk, session_evt_elt_t * elt)
pool_elt_at_index (wrk->event_elts, wrk->old_head));
}
static inline void
session_evt_add_head_old (session_worker_t * wrk, session_evt_elt_t * elt)
{
clib_llist_add (wrk->event_elts, evt_list, elt,
pool_elt_at_index (wrk->event_elts, wrk->old_head));
}
static inline u32
session_evt_ctrl_data_alloc (session_worker_t * wrk)
{

View File

@ -865,14 +865,22 @@ session_tx_fifo_read_and_snd_i (session_worker_t * wrk,
}
ctx->snd_mss = ctx->transport_vft->send_mss (ctx->tc);
if (PREDICT_FALSE (ctx->snd_mss == 0))
{
session_evt_add_old (wrk, elt);
return SESSION_TX_NO_DATA;
}
ctx->snd_space = transport_connection_snd_space (ctx->tc,
vm->clib_time.
last_cpu_time,
ctx->snd_mss);
if (ctx->snd_space == 0 || ctx->snd_mss == 0)
/* This flow queue is "empty" so it should be re-evaluated before
* the ones that have data to send. */
if (ctx->snd_space == 0)
{
session_evt_add_old (wrk, elt);
session_evt_add_head_old (wrk, elt);
return SESSION_TX_NO_DATA;
}
@ -898,7 +906,7 @@ session_tx_fifo_read_and_snd_i (session_worker_t * wrk,
{
if (n_bufs)
vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
session_evt_add_old (wrk, elt);
session_evt_add_head_old (wrk, elt);
vlib_node_increment_counter (wrk->vm, node->node_index,
SESSION_QUEUE_ERROR_NO_BUFFER, 1);
return SESSION_TX_NO_BUFFERS;
@ -1321,19 +1329,24 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
if (old_ti != wrk->old_head)
{
clib_llist_index_t ei, next_ei;
old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
ei = clib_llist_next_index (old_he, evt_list);
while (n_tx_packets < VLIB_FRAME_SIZE)
{
clib_llist_index_t ei;
elt = pool_elt_at_index (wrk->event_elts, ei);
next_ei = clib_llist_next_index (elt, evt_list);
clib_llist_remove (wrk->event_elts, evt_list, elt);
clib_llist_pop_first (wrk->event_elts, evt_list, elt, old_he);
ei = clib_llist_entry_index (wrk->event_elts, elt);
session_event_dispatch_io (wrk, node, elt, thread_index,
&n_tx_packets);
old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
if (ei == old_ti)
break;
ei = next_ei;
};
}