tcp: improve pacing after idle send periods

Rest pacer on ack reception if we haven't recently sent anything.

Type: feature

Change-Id: I820bacd81b65130052dfafbfcbe6ca4553069fbc
Signed-off-by: Florin Coras <fcoras@cisco.com>
(cherry picked from commit c31dc31f84961033ecb6354811e0c360b6cf5f79)
This commit is contained in:
Florin Coras
2019-10-06 14:06:14 -07:00
committed by Andrew Yourtchenko
parent 9a114bcb28
commit abdc7dfb5f
6 changed files with 125 additions and 77 deletions

View File

@ -843,6 +843,7 @@ session_tx_fifo_read_and_snd_i (session_worker_t * wrk,
{
if (ctx->transport_vft->flush_data)
ctx->transport_vft->flush_data (ctx->tc);
e->event_type = SESSION_IO_EVT_TX;
}
if (ctx->s->flags & SESSION_F_CUSTOM_TX)
@ -881,7 +882,12 @@ session_tx_fifo_read_and_snd_i (session_worker_t * wrk,
session_tx_set_dequeue_params (vm, ctx, max_burst, peek_data);
if (PREDICT_FALSE (!ctx->max_len_to_snd))
return SESSION_TX_NO_DATA;
{
transport_connection_tx_pacer_reset_bucket (ctx->tc,
vm->clib_time.
last_cpu_time);
return SESSION_TX_NO_DATA;
}
n_bufs_needed = ctx->n_segs_per_evt * ctx->n_bufs_per_seg;
vec_validate_aligned (wrk->tx_buffers, n_bufs_needed - 1,

View File

@ -47,10 +47,6 @@ static clib_spinlock_t local_endpoints_lock;
*/
static double transport_pacer_period;
#define TRANSPORT_PACER_MIN_MSS 1460
#define TRANSPORT_PACER_MIN_BURST TRANSPORT_PACER_MIN_MSS
#define TRANSPORT_PACER_MAX_BURST (43 * TRANSPORT_PACER_MIN_MSS)
u8 *
format_transport_proto (u8 * s, va_list * args)
{
@ -583,6 +579,13 @@ spacer_max_burst (spacer_t * pacer, u64 norm_time_now)
u64 n_periods = norm_time_now - pacer->last_update;
u64 inc;
if (PREDICT_FALSE (n_periods > 5e5))
{
pacer->last_update = norm_time_now;
pacer->bucket = TRANSPORT_PACER_MIN_BURST;
return TRANSPORT_PACER_MIN_BURST;
}
if (n_periods > 0
&& (inc = (f32) n_periods * pacer->tokens_per_period) > 10)
{
@ -674,7 +677,7 @@ transport_connection_snd_space (transport_connection_t * tc, u64 time_now,
u32 snd_space, max_paced_burst;
snd_space = tp_vfts[tc->proto].send_space (tc);
if (transport_connection_is_tx_paced (tc))
if (snd_space && transport_connection_is_tx_paced (tc))
{
time_now >>= SPACER_CPU_TICKS_PER_PERIOD_SHIFT;
max_paced_burst = spacer_max_burst (&tc->pacer, time_now);

View File

@ -19,6 +19,10 @@
#include <vnet/vnet.h>
#include <vnet/session/transport_types.h>
#define TRANSPORT_PACER_MIN_MSS 1460
#define TRANSPORT_PACER_MIN_BURST TRANSPORT_PACER_MIN_MSS
#define TRANSPORT_PACER_MAX_BURST (43 * TRANSPORT_PACER_MIN_MSS)
typedef struct _transport_options_t
{
transport_tx_fn_type_t tx_type;

View File

@ -372,6 +372,7 @@ typedef struct _tcp_connection
u32 prr_start; /**< snd_una when prr starts */
u32 rxt_delivered; /**< Rxt bytes delivered during current cc event */
u32 rxt_head; /**< snd_una last time we re rxted the head */
u32 prev_dsegs_out; /**< Number of dsegs after last ack */
u32 tsecr_last_ack; /**< Timestamp echoed to us in last healthy ACK */
u32 snd_congestion; /**< snd_una_max when congestion is detected */
u32 tx_fifo_size; /**< Tx fifo size. Used to constrain cwnd */

View File

@ -574,6 +574,15 @@ tcp_estimate_initial_rtt (tcp_connection_t * tc)
tcp_update_rto (tc);
}
always_inline u8
tcp_recovery_no_snd_space (tcp_connection_t * tc)
{
return (tcp_in_fastrecovery (tc)
&& tcp_fastrecovery_prr_snd_space (tc) < tc->snd_mss)
|| (tcp_in_recovery (tc)
&& tcp_available_output_snd_space (tc) < tc->snd_mss);
}
/**
* Dequeue bytes for connections that have received acks in last burst
*/
@ -594,26 +603,35 @@ tcp_handle_postponed_dequeues (tcp_worker_ctx_t * wrk)
tc = tcp_connection_get (pending_deq_acked[i], thread_index);
tc->flags &= ~TCP_CONN_DEQ_PENDING;
if (PREDICT_FALSE (!tc->burst_acked))
continue;
/* Dequeue the newly ACKed bytes */
session_tx_fifo_dequeue_drop (&tc->connection, tc->burst_acked);
tc->burst_acked = 0;
tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
if (tc->burst_acked)
{
if (seq_leq (tc->psh_seq, tc->snd_una))
tc->flags &= ~TCP_CONN_PSH_PENDING;
/* Dequeue the newly ACKed bytes */
session_tx_fifo_dequeue_drop (&tc->connection, tc->burst_acked);
tc->burst_acked = 0;
tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
{
if (seq_leq (tc->psh_seq, tc->snd_una))
tc->flags &= ~TCP_CONN_PSH_PENDING;
}
/* If everything has been acked, stop retransmit timer
* otherwise update. */
tcp_retransmit_timer_update (tc);
/* Update pacer based on our new cwnd estimate */
tcp_connection_tx_pacer_update (tc);
}
/* If everything has been acked, stop retransmit timer
* otherwise update. */
tcp_retransmit_timer_update (tc);
/* Update pacer based on our new cwnd estimate */
tcp_connection_tx_pacer_update (tc);
/* Reset the pacer if we've been idle, i.e., no data sent or if
* we're in recovery and snd space constrained */
if (tc->data_segs_out == tc->prev_dsegs_out
|| tcp_recovery_no_snd_space (tc))
transport_connection_tx_pacer_reset_bucket (&tc->connection,
wrk->vm->clib_time.
last_cpu_time);
tc->prev_dsegs_out = tc->data_segs_out;
}
_vec_len (wrk->pending_deq_acked) = 0;
}
@ -1335,6 +1353,8 @@ tcp_cc_recover (tcp_connection_t * tc)
tc->rtt_ts = 0;
tc->flags &= ~TCP_CONN_RXT_PENDING;
tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
/* Previous recovery left us congested. Continue sending as part
* of the current recovery event with an updated snd_congestion */
if (tc->sack_sb.sacked_bytes)
@ -1354,8 +1374,6 @@ tcp_cc_recover (tcp_connection_t * tc)
if (!tcp_in_recovery (tc) && !is_spurious)
tcp_cc_recovered (tc);
tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
tcp_fastrecovery_off (tc);
tcp_fastrecovery_first_off (tc);
tcp_recovery_off (tc);
@ -1589,11 +1607,10 @@ process_ack:
if (tc->flags & TCP_CONN_RATE_SAMPLE)
tcp_bt_sample_delivery_rate (tc, &rs);
tcp_program_dequeue (wrk, tc);
if (tc->bytes_acked)
{
tcp_program_dequeue (wrk, tc);
tcp_update_rtt (tc, &rs, vnet_buffer (b)->tcp.ack_number);
}
tcp_update_rtt (tc, &rs, vnet_buffer (b)->tcp.ack_number);
TCP_EVT (TCP_EVT_ACK_RCVD, tc);

View File

@ -409,7 +409,10 @@ tcp_update_burst_snd_vars (tcp_connection_t * tc)
tc->flags |= TCP_CONN_TRACK_BURST;
if (tc->snd_una == tc->snd_nxt)
tcp_cc_event (tc, TCP_CC_EVT_START_TX);
{
tcp_cc_event (tc, TCP_CC_EVT_START_TX);
tcp_connection_tx_pacer_reset (tc, tc->cwnd, TRANSPORT_PACER_MIN_MSS);
}
}
#endif /* CLIB_MARCH_VARIANT */
@ -1435,19 +1438,7 @@ tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
/* Start is beyond snd_congestion */
start = tc->snd_una + offset;
if (seq_geq (start, tc->snd_congestion))
return 0;
/* Don't overshoot snd_congestion */
if (seq_gt (start + max_deq_bytes, tc->snd_congestion))
{
max_deq_bytes = tc->snd_congestion - start;
if (max_deq_bytes == 0)
return 0;
}
n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
if (!n_bytes)
return 0;
@ -1903,17 +1894,28 @@ static int
tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
u32 burst_size)
{
u8 snd_limited = 0, can_rescue = 0, reset_pacer = 0;
u32 n_written = 0, offset, max_bytes, n_segs = 0;
u32 bi, max_deq, burst_bytes, sent_bytes;
sack_scoreboard_hole_t *hole;
vlib_main_t *vm = wrk->vm;
vlib_buffer_t *b = 0;
sack_scoreboard_t *sb;
u32 bi, max_deq;
int snd_space;
u8 snd_limited = 0, can_rescue = 0;
u64 time_now;
ASSERT (tcp_in_cong_recovery (tc));
time_now = wrk->vm->clib_time.last_cpu_time;
burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
time_now);
burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
if (!burst_size)
{
tcp_program_retransmit (tc);
return 0;
}
if (tcp_in_recovery (tc))
snd_space = tcp_available_cc_snd_space (tc);
else
@ -1921,13 +1923,12 @@ tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
if (snd_space < tc->snd_mss)
{
/* We're cc constrained so don't accumulate tokens */
transport_connection_tx_pacer_reset_bucket (&tc->connection,
vm->
clib_time.last_cpu_time);
return 0;
reset_pacer = burst_bytes > tc->snd_mss;
goto done;
}
reset_pacer = snd_space < burst_bytes;
sb = &tc->sack_sb;
/* Check if snd_una is a lost retransmit */
@ -1967,9 +1968,12 @@ tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
/* We are out of lost holes to retransmit so send some new data. */
if (max_deq > tc->snd_mss)
{
u32 n_segs_new, av_window;
av_window = tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
snd_space = clib_min (snd_space, av_window);
u32 n_segs_new;
int av_wnd;
av_wnd = (int) tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
av_wnd = clib_max (av_wnd, 0);
snd_space = clib_min (snd_space, av_wnd);
snd_space = clib_min (max_deq, snd_space);
burst_size = clib_min (burst_size - n_segs,
snd_space / tc->snd_mss);
@ -2034,6 +2038,19 @@ tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
done:
if (reset_pacer)
{
transport_connection_tx_pacer_reset_bucket (&tc->connection,
vm->clib_time.
last_cpu_time);
}
else
{
sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
transport_connection_tx_pacer_update_bytes (&tc->connection,
sent_bytes);
}
return n_segs;
}
@ -2045,14 +2062,28 @@ tcp_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
u32 burst_size)
{
u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now;
u32 burst_bytes, sent_bytes;
vlib_main_t *vm = wrk->vm;
int snd_space, n_segs = 0;
u8 cc_limited = 0;
vlib_buffer_t *b;
u64 time_now;
ASSERT (tcp_in_fastrecovery (tc));
TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
time_now = wrk->vm->clib_time.last_cpu_time;
burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
time_now);
burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
if (!burst_size)
{
tcp_program_retransmit (tc);
return 0;
}
snd_space = tcp_available_cc_snd_space (tc);
cc_limited = snd_space < burst_bytes;
if (!tcp_fastrecovery_first (tc))
goto send_unsent;
@ -2098,19 +2129,12 @@ send_unsent:
done:
tcp_fastrecovery_first_off (tc);
return n_segs;
}
/**
* Do fast retransmit
*/
static int
tcp_retransmit (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, u32 burst_size)
{
if (tcp_opts_sack_permitted (&tc->rcv_opts))
return tcp_retransmit_sack (wrk, tc, burst_size);
else
return tcp_retransmit_no_sack (wrk, tc, burst_size);
sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
sent_bytes = cc_limited ? burst_bytes : sent_bytes;
transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
return n_segs;
}
static int
@ -2164,23 +2188,16 @@ tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
static int
tcp_do_retransmit (tcp_connection_t * tc, u32 max_burst_size)
{
u32 n_segs = 0, burst_size, sent_bytes, burst_bytes;
tcp_worker_ctx_t *wrk;
u32 n_segs;
wrk = tcp_get_worker (tc->c_thread_index);
burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
wrk->vm->
clib_time.last_cpu_time);
burst_size = clib_min (max_burst_size, burst_bytes / tc->snd_mss);
if (!burst_size)
{
tcp_program_retransmit (tc);
return 0;
}
n_segs = tcp_retransmit (wrk, tc, burst_size);
sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
if (tcp_opts_sack_permitted (&tc->rcv_opts))
n_segs = tcp_retransmit_sack (wrk, tc, max_burst_size);
else
n_segs = tcp_retransmit_no_sack (wrk, tc, max_burst_size);
return n_segs;
}