tcp: use sacks for timer based recovery
Type: feature If available, reuse sack scoreboard in timer triggered retransmit to minimize spurious retransmits. Additional changes/refactoring: - limited transmit updates - add sacked rxt count to scoreboard - prr pacing of fast retransmits - startup pacing updates - changed loss window to flight + mss Change-Id: I057de6a9d6401698bd1031d5cf5cfbb62f2bdf61 Signed-off-by: Florin Coras <fcoras@cisco.com> (cherry picked from commit 36ebcfffbc7ab0e83b4bb8dfaec16bf16cafb954)
This commit is contained in:

committed by
Andrew Yourtchenko

parent
45aab261a1
commit
d4aa3d9f88
@ -578,7 +578,8 @@ spacer_max_burst (spacer_t * pacer, u64 norm_time_now)
|
||||
u64 n_periods = norm_time_now - pacer->last_update;
|
||||
u64 inc;
|
||||
|
||||
if (n_periods > 0 && (inc = n_periods * pacer->tokens_per_period) > 10)
|
||||
if (n_periods > 0
|
||||
&& (inc = (f32) n_periods * pacer->tokens_per_period) > 10)
|
||||
{
|
||||
pacer->last_update = norm_time_now;
|
||||
pacer->bucket = clib_min (pacer->bucket + inc, pacer->bytes_per_sec);
|
||||
@ -608,6 +609,13 @@ spacer_pace_rate (spacer_t * pacer)
|
||||
return pacer->bytes_per_sec;
|
||||
}
|
||||
|
||||
static inline void
|
||||
spacer_reset_bucket (spacer_t * pacer, u64 norm_time_now)
|
||||
{
|
||||
pacer->last_update = norm_time_now;
|
||||
pacer->bucket = 0;
|
||||
}
|
||||
|
||||
void
|
||||
transport_connection_tx_pacer_reset (transport_connection_t * tc,
|
||||
u32 rate_bytes_per_sec,
|
||||
@ -646,6 +654,14 @@ transport_connection_tx_pacer_burst (transport_connection_t * tc,
|
||||
return spacer_max_burst (&tc->pacer, time_now);
|
||||
}
|
||||
|
||||
void
|
||||
transport_connection_tx_pacer_reset_bucket (transport_connection_t * tc,
|
||||
u64 time_now)
|
||||
{
|
||||
time_now >>= SPACER_CPU_TICKS_PER_PERIOD_SHIFT;
|
||||
spacer_reset_bucket (&tc->pacer, time_now);
|
||||
}
|
||||
|
||||
u32
|
||||
transport_connection_snd_space (transport_connection_t * tc, u64 time_now,
|
||||
u16 mss)
|
||||
|
@ -219,6 +219,15 @@ u32 transport_connection_tx_pacer_burst (transport_connection_t * tc,
|
||||
*/
|
||||
u64 transport_connection_tx_pacer_rate (transport_connection_t * tc);
|
||||
|
||||
/**
|
||||
* Reset tx pacer bucket
|
||||
*
|
||||
* @param tc transport connection
|
||||
* @param time_now current cpu time
|
||||
*/
|
||||
void transport_connection_tx_pacer_reset_bucket (transport_connection_t * tc,
|
||||
u64 time_now);
|
||||
|
||||
/**
|
||||
* Initialize period for tx pacers
|
||||
*
|
||||
|
@ -699,17 +699,15 @@ tcp_init_snd_vars (tcp_connection_t * tc)
|
||||
tc->snd_una = tc->iss;
|
||||
tc->snd_nxt = tc->iss + 1;
|
||||
tc->snd_una_max = tc->snd_nxt;
|
||||
tc->srtt = 0;
|
||||
tc->srtt = 100; /* 100 ms */
|
||||
}
|
||||
|
||||
void
|
||||
tcp_enable_pacing (tcp_connection_t * tc)
|
||||
{
|
||||
u32 initial_bucket, byte_rate;
|
||||
initial_bucket = 16 * tc->snd_mss;
|
||||
byte_rate = 2 << 16;
|
||||
transport_connection_tx_pacer_init (&tc->connection, byte_rate,
|
||||
initial_bucket);
|
||||
u32 byte_rate;
|
||||
byte_rate = tc->cwnd / (tc->srtt * TCP_TICK);
|
||||
transport_connection_tx_pacer_init (&tc->connection, byte_rate, tc->cwnd);
|
||||
tc->mrtt_us = (u32) ~ 0;
|
||||
}
|
||||
|
||||
@ -723,10 +721,11 @@ tcp_connection_init_vars (tcp_connection_t * tc)
|
||||
tcp_connection_timers_init (tc);
|
||||
tcp_init_mss (tc);
|
||||
scoreboard_init (&tc->sack_sb);
|
||||
tcp_cc_init (tc);
|
||||
if (tc->state == TCP_STATE_SYN_RCVD)
|
||||
tcp_init_snd_vars (tc);
|
||||
|
||||
tcp_cc_init (tc);
|
||||
|
||||
if (!tc->c_is_ip4 && ip6_address_is_link_local_unicast (&tc->c_rmt_ip6))
|
||||
tcp_add_del_adjacency (tc, 1);
|
||||
|
||||
@ -919,7 +918,7 @@ format_tcp_congestion (u8 * s, va_list * args)
|
||||
s = format (s, "%U ", format_tcp_congestion_status, tc);
|
||||
s = format (s, "algo %s cwnd %u ssthresh %u bytes_acked %u\n",
|
||||
tc->cc_algo->name, tc->cwnd, tc->ssthresh, tc->bytes_acked);
|
||||
s = format (s, "%Ucc space %u prev_cwnd %u prev_ssthresh %u rtx_bytes %u\n",
|
||||
s = format (s, "%Ucc space %u prev_cwnd %u prev_ssthresh %u rxt_bytes %u\n",
|
||||
format_white_space, indent, tcp_available_cc_snd_space (tc),
|
||||
tc->prev_cwnd, tc->prev_ssthresh, tc->snd_rxt_bytes);
|
||||
s = format (s, "%Usnd_congestion %u dupack %u limited_transmit %u\n",
|
||||
@ -1139,8 +1138,9 @@ format_tcp_scoreboard (u8 * s, va_list * args)
|
||||
sack_scoreboard_hole_t *hole;
|
||||
u32 indent = format_get_indent (s);
|
||||
|
||||
s = format (s, "sacked_bytes %u last_sacked_bytes %u lost_bytes %u\n",
|
||||
sb->sacked_bytes, sb->last_sacked_bytes, sb->lost_bytes);
|
||||
s = format (s, "sacked %u last_sacked %u lost %u last_lost %u\n",
|
||||
sb->sacked_bytes, sb->last_sacked_bytes, sb->lost_bytes,
|
||||
sb->last_lost_bytes);
|
||||
s = format (s, "%Ulast_bytes_delivered %u high_sacked %u is_reneging %u\n",
|
||||
format_white_space, indent, sb->last_bytes_delivered,
|
||||
sb->high_sacked - tc->iss, sb->is_reneging);
|
||||
@ -1245,7 +1245,7 @@ tcp_round_snd_space (tcp_connection_t * tc, u32 snd_space)
|
||||
static inline u32
|
||||
tcp_snd_space_inline (tcp_connection_t * tc)
|
||||
{
|
||||
int snd_space, snt_limited;
|
||||
int snd_space;
|
||||
|
||||
if (PREDICT_FALSE (tcp_in_fastrecovery (tc)
|
||||
|| tc->state == TCP_STATE_CLOSED))
|
||||
@ -1253,18 +1253,21 @@ tcp_snd_space_inline (tcp_connection_t * tc)
|
||||
|
||||
snd_space = tcp_available_output_snd_space (tc);
|
||||
|
||||
/* If we haven't gotten dupacks or if we did and have gotten sacked
|
||||
* bytes then we can still send as per Limited Transmit (RFC3042) */
|
||||
if (PREDICT_FALSE (tc->rcv_dupacks != 0
|
||||
&& (tcp_opts_sack_permitted (tc)
|
||||
&& tc->sack_sb.last_sacked_bytes == 0)))
|
||||
/* If we got dupacks or sacked bytes but we're not yet in recovery, try
|
||||
* to force the peer to send enough dupacks to start retransmitting as
|
||||
* per Limited Transmit (RFC3042)
|
||||
*/
|
||||
if (PREDICT_FALSE (tc->rcv_dupacks != 0 || tc->sack_sb.sacked_bytes))
|
||||
{
|
||||
if (tc->rcv_dupacks == 1 && tc->limited_transmit != tc->snd_nxt)
|
||||
if (tc->limited_transmit != tc->snd_nxt
|
||||
&& (seq_lt (tc->limited_transmit, tc->snd_nxt - 2 * tc->snd_mss)
|
||||
|| seq_gt (tc->limited_transmit, tc->snd_nxt)))
|
||||
tc->limited_transmit = tc->snd_nxt;
|
||||
|
||||
ASSERT (seq_leq (tc->limited_transmit, tc->snd_nxt));
|
||||
|
||||
snt_limited = tc->snd_nxt - tc->limited_transmit;
|
||||
snd_space = clib_max (2 * tc->snd_mss - snt_limited, 0);
|
||||
int snt_limited = tc->snd_nxt - tc->limited_transmit;
|
||||
snd_space = clib_max ((int) 2 * tc->snd_mss - snt_limited, 0);
|
||||
}
|
||||
return tcp_round_snd_space (tc, snd_space);
|
||||
}
|
||||
@ -1358,9 +1361,9 @@ tcp_connection_tx_pacer_reset (tcp_connection_t * tc, u32 window,
|
||||
u32 start_bucket)
|
||||
{
|
||||
tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
|
||||
u32 byte_rate = window / ((f64) TCP_TICK * tc->srtt);
|
||||
f64 srtt = clib_min ((f64) tc->srtt * TCP_TICK, tc->mrtt_us);
|
||||
u64 last_time = wrk->vm->clib_time.last_cpu_time;
|
||||
transport_connection_tx_pacer_reset (&tc->connection, byte_rate,
|
||||
transport_connection_tx_pacer_reset (&tc->connection, window / srtt,
|
||||
start_bucket, last_time);
|
||||
}
|
||||
|
||||
|
@ -32,6 +32,7 @@
|
||||
#define TCP_MAX_OPTION_SPACE 40
|
||||
#define TCP_CC_DATA_SZ 24
|
||||
#define TCP_MAX_GSO_SZ 65536
|
||||
#define TCP_RXT_MAX_BURST 10
|
||||
|
||||
#define TCP_DUPACK_THRESHOLD 3
|
||||
#define TCP_IW_N_SEGMENTS 10
|
||||
@ -111,7 +112,7 @@ extern timer_expiration_handler tcp_timer_retransmit_syn_handler;
|
||||
_(DCNT_PENDING, "Disconnect pending") \
|
||||
_(HALF_OPEN_DONE, "Half-open completed") \
|
||||
_(FINPNDG, "FIN pending") \
|
||||
_(FRXT_PENDING, "Fast-retransmit pending") \
|
||||
_(RXT_PENDING, "Retransmit pending") \
|
||||
_(FRXT_FIRST, "Fast-retransmit first again") \
|
||||
_(DEQ_PENDING, "Pending dequeue acked") \
|
||||
_(PSH_PENDING, "PSH pending") \
|
||||
@ -166,6 +167,7 @@ typedef struct _sack_scoreboard
|
||||
u32 sacked_bytes; /**< Number of bytes sacked in sb */
|
||||
u32 last_sacked_bytes; /**< Number of bytes last sacked */
|
||||
u32 last_bytes_delivered; /**< Sack bytes delivered to app */
|
||||
u32 rxt_sacked; /**< Rxt last delivered */
|
||||
u32 high_sacked; /**< Highest byte sacked (fack) */
|
||||
u32 high_rxt; /**< Highest retransmitted sequence */
|
||||
u32 rescue_rxt; /**< Rescue sequence number */
|
||||
@ -219,8 +221,11 @@ sack_scoreboard_hole_t *scoreboard_prev_hole (sack_scoreboard_t * sb,
|
||||
sack_scoreboard_hole_t * hole);
|
||||
sack_scoreboard_hole_t *scoreboard_first_hole (sack_scoreboard_t * sb);
|
||||
sack_scoreboard_hole_t *scoreboard_last_hole (sack_scoreboard_t * sb);
|
||||
|
||||
void scoreboard_clear (sack_scoreboard_t * sb);
|
||||
void scoreboard_clear_reneging (sack_scoreboard_t * sb, u32 start, u32 end);
|
||||
void scoreboard_init (sack_scoreboard_t * sb);
|
||||
void scoreboard_init_high_rxt (sack_scoreboard_t * sb, u32 snd_una);
|
||||
u8 *format_tcp_scoreboard (u8 * s, va_list * args);
|
||||
|
||||
#define TCP_BTS_INVALID_INDEX ((u32)~0)
|
||||
@ -360,8 +365,10 @@ typedef struct _tcp_connection
|
||||
u32 prev_cwnd; /**< ssthresh before congestion */
|
||||
u32 bytes_acked; /**< Bytes acknowledged by current segment */
|
||||
u32 burst_acked; /**< Bytes acknowledged in current burst */
|
||||
u32 snd_rxt_bytes; /**< Retransmitted bytes */
|
||||
u32 snd_rxt_bytes; /**< Retransmitted bytes during current cc event */
|
||||
u32 snd_rxt_ts; /**< Timestamp when first packet is retransmitted */
|
||||
u32 prr_delivered; /**< RFC6937 bytes delivered during current event */
|
||||
u32 rxt_delivered; /**< Rxt bytes delivered during current cc event */
|
||||
u32 tsecr_last_ack; /**< Timestamp echoed to us in last healthy ACK */
|
||||
u32 snd_congestion; /**< snd_una_max when congestion is detected */
|
||||
u32 tx_fifo_size; /**< Tx fifo size. Used to constrain cwnd */
|
||||
@ -757,7 +764,7 @@ void tcp_send_window_update_ack (tcp_connection_t * tc);
|
||||
|
||||
void tcp_program_ack (tcp_connection_t * tc);
|
||||
void tcp_program_dupack (tcp_connection_t * tc);
|
||||
void tcp_program_fastretransmit (tcp_connection_t * tc);
|
||||
void tcp_program_retransmit (tcp_connection_t * tc);
|
||||
|
||||
/*
|
||||
* Rate estimation
|
||||
@ -857,18 +864,9 @@ tcp_flight_size (const tcp_connection_t * tc)
|
||||
int flight_size;
|
||||
|
||||
flight_size = (int) (tc->snd_nxt - tc->snd_una) - tcp_bytes_out (tc)
|
||||
+ tc->snd_rxt_bytes;
|
||||
+ tc->snd_rxt_bytes - tc->rxt_delivered;
|
||||
|
||||
if (flight_size < 0)
|
||||
{
|
||||
if (0)
|
||||
clib_warning
|
||||
("Negative: %u %u %u dupacks %u sacked bytes %u flags %d",
|
||||
tc->snd_una_max - tc->snd_una, tcp_bytes_out (tc),
|
||||
tc->snd_rxt_bytes, tc->rcv_dupacks, tc->sack_sb.sacked_bytes,
|
||||
tc->rcv_opts.flags);
|
||||
return 0;
|
||||
}
|
||||
ASSERT (flight_size >= 0);
|
||||
|
||||
return flight_size;
|
||||
}
|
||||
@ -912,7 +910,8 @@ tcp_cwnd_accumulate (tcp_connection_t * tc, u32 thresh, u32 bytes)
|
||||
always_inline u32
|
||||
tcp_loss_wnd (const tcp_connection_t * tc)
|
||||
{
|
||||
return tc->snd_mss;
|
||||
/* Whatever we have in flight + the packet we're about to send */
|
||||
return tcp_flight_size (tc) + tc->snd_mss;
|
||||
}
|
||||
|
||||
always_inline u32
|
||||
@ -951,22 +950,14 @@ tcp_available_cc_snd_space (const tcp_connection_t * tc)
|
||||
always_inline u8
|
||||
tcp_is_lost_fin (tcp_connection_t * tc)
|
||||
{
|
||||
if ((tc->flags & TCP_CONN_FINSNT) && tc->snd_una_max - tc->snd_una == 1)
|
||||
if ((tc->flags & TCP_CONN_FINSNT) && (tc->snd_una_max - tc->snd_una == 1))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 tcp_snd_space (tcp_connection_t * tc);
|
||||
int tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk,
|
||||
tcp_connection_t * tc);
|
||||
int tcp_fast_retransmit_no_sack (tcp_worker_ctx_t * wrk,
|
||||
tcp_connection_t * tc, u32 burst_size);
|
||||
int tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
|
||||
u32 burst_size);
|
||||
int tcp_fast_retransmit (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
|
||||
u32 burst_size);
|
||||
void tcp_cc_init_congestion (tcp_connection_t * tc);
|
||||
void tcp_cc_fastrecovery_clear (tcp_connection_t * tc);
|
||||
//void tcp_cc_init_congestion (tcp_connection_t * tc);
|
||||
//void tcp_cc_fastrecovery_clear (tcp_connection_t * tc);
|
||||
|
||||
fib_node_index_t tcp_lookup_rmt_in_fib (tcp_connection_t * tc);
|
||||
|
||||
@ -1035,6 +1026,12 @@ tcp_cc_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type,
|
||||
tc->cc_algo->rcv_cong_ack (tc, ack_type, rs);
|
||||
}
|
||||
|
||||
static inline void
|
||||
tcp_cc_congestion (tcp_connection_t * tc)
|
||||
{
|
||||
tc->cc_algo->congestion (tc);
|
||||
}
|
||||
|
||||
static inline void
|
||||
tcp_cc_loss (tcp_connection_t * tc)
|
||||
{
|
||||
@ -1068,9 +1065,10 @@ tcp_cc_get_pacing_rate (tcp_connection_t * tc)
|
||||
return tc->cc_algo->get_pacing_rate (tc);
|
||||
|
||||
f64 srtt = clib_min ((f64) tc->srtt * TCP_TICK, tc->mrtt_us);
|
||||
|
||||
/* TODO should constrain to interface's max throughput but
|
||||
* we don't have link speeds for sw ifs ..*/
|
||||
return (tc->cwnd / srtt);
|
||||
return ((f64) tc->cwnd / srtt);
|
||||
}
|
||||
|
||||
always_inline void
|
||||
|
@ -103,10 +103,7 @@ cubic_congestion (tcp_connection_t * tc)
|
||||
|
||||
cd->w_max = w_max;
|
||||
tc->ssthresh = clib_max (tc->cwnd * beta_cubic, 2 * tc->snd_mss);
|
||||
|
||||
tc->cwnd = tc->ssthresh;
|
||||
if (!tcp_opts_sack_permitted (&tc->rcv_opts))
|
||||
tc->cwnd += 3 * tc->snd_mss;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -114,11 +111,10 @@ cubic_loss (tcp_connection_t * tc)
|
||||
{
|
||||
cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
|
||||
|
||||
tc->ssthresh = clib_max (tc->cwnd * beta_cubic, 2 * tc->snd_mss);
|
||||
tc->cwnd = tcp_loss_wnd (tc);
|
||||
cd->t_start = cubic_time (tc->c_thread_index);
|
||||
cd->K = 0;
|
||||
cd->w_max = 0;
|
||||
cd->w_max = tc->cwnd / tc->snd_mss;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -159,7 +155,7 @@ cubic_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
|
||||
|
||||
if (tcp_in_slowstart (tc))
|
||||
{
|
||||
tc->cwnd += clib_min (tc->snd_mss, tc->bytes_acked);
|
||||
tc->cwnd += tc->bytes_acked;
|
||||
return;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -20,17 +20,11 @@ newreno_congestion (tcp_connection_t * tc)
|
||||
{
|
||||
tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss);
|
||||
tc->cwnd = tc->ssthresh;
|
||||
/* Post retransmit update cwnd to ssthresh and account for the
|
||||
* three segments that have left the network and should've been
|
||||
* buffered at the receiver XXX */
|
||||
if (!tcp_opts_sack_permitted (&tc->rcv_opts))
|
||||
tc->cwnd += 3 * tc->snd_mss;
|
||||
}
|
||||
|
||||
static void
|
||||
newreno_loss (tcp_connection_t * tc)
|
||||
{
|
||||
tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss);
|
||||
tc->cwnd = tcp_loss_wnd (tc);
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -143,9 +143,8 @@ typedef struct _sack_block
|
||||
typedef struct
|
||||
{
|
||||
u8 flags; /** Option flags, see above */
|
||||
|
||||
u16 mss; /**< Maximum segment size advertised */
|
||||
u8 wscale; /**< Window scale advertised */
|
||||
u16 mss; /**< Maximum segment size advertised */
|
||||
u32 tsval; /**< Timestamp value */
|
||||
u32 tsecr; /**< Echoed/reflected time stamp */
|
||||
sack_block_t *sacks; /**< SACK blocks */
|
||||
|
Reference in New Issue
Block a user