tcp: fix fin_wait_1 condition to send fin

Also add the closed-waiting session state wherein the session still
allows the transport to send oustanding data.

Change-Id: Ic47807379906ef2010934381ff0b9e53c7e631d8
Signed-off-by: Florin Coras <fcoras@cisco.com>
This commit is contained in:
Florin Coras
2018-12-20 18:24:49 -08:00
committed by Dave Barach
parent 00a469d969
commit 78cc4b0797
4 changed files with 20 additions and 3 deletions

View File

@ -819,6 +819,7 @@ stream_session_delete_notify (transport_connection_t * tc)
break; break;
case SESSION_STATE_CLOSED: case SESSION_STATE_CLOSED:
case SESSION_STATE_ACCEPTING: case SESSION_STATE_ACCEPTING:
case SESSION_STATE_CLOSED_WAITING:
stream_session_delete (s); stream_session_delete (s);
break; break;
default: default:
@ -1112,7 +1113,18 @@ stream_session_disconnect_transport (stream_session_t * s)
session_free_w_fifos (s); session_free_w_fifos (s);
return; return;
} }
s->session_state = SESSION_STATE_CLOSED;
/* If tx queue wasn't drained, change state to closed waiting for transport.
* This way, the transport, if it so wishes, can continue to try sending the
* outstanding data (in closed state it cannot). It MUST however at one
* point, either after sending everything or after a timeout, call delete
* notify. This will finally lead to the complete cleanup of the session.
*/
if (svm_fifo_max_dequeue (s->server_tx_fifo))
s->session_state = SESSION_STATE_CLOSED_WAITING;
else
s->session_state = SESSION_STATE_CLOSED;
tp_vfts[session_get_transport_proto (s)].close (s->connection_index, tp_vfts[session_get_transport_proto (s)].close (s->connection_index,
s->thread_index); s->thread_index);
} }

View File

@ -33,6 +33,7 @@ typedef enum
SESSION_STATE_OPENED, SESSION_STATE_OPENED,
SESSION_STATE_TRANSPORT_CLOSING, SESSION_STATE_TRANSPORT_CLOSING,
SESSION_STATE_CLOSING, SESSION_STATE_CLOSING,
SESSION_STATE_CLOSED_WAITING,
SESSION_STATE_CLOSED, SESSION_STATE_CLOSED,
SESSION_STATE_N_STATES, SESSION_STATE_N_STATES,
} stream_session_state_t; } stream_session_state_t;

View File

@ -1271,6 +1271,9 @@ tcp_timer_waitclose_handler (u32 conn_index)
else if (tc->state == TCP_STATE_FIN_WAIT_1) else if (tc->state == TCP_STATE_FIN_WAIT_1)
{ {
tcp_connection_timers_reset (tc); tcp_connection_timers_reset (tc);
/* If FIN pending send it before closing */
if (tc->flags & TCP_CONN_FINPNDG)
tcp_send_fin (tc);
tc->state = TCP_STATE_CLOSED; tc->state = TCP_STATE_CLOSED;
/* Wait for session layer to clean up tx events */ /* Wait for session layer to clean up tx events */
tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME); tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);

View File

@ -2645,7 +2645,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
{ {
u32 thread_index = vm->thread_index, errors = 0, *first_buffer; u32 thread_index = vm->thread_index, errors = 0, *first_buffer;
tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index); tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
u32 n_left_from, *from; u32 n_left_from, *from, max_dequeue;
from = first_buffer = vlib_frame_vector_args (from_frame); from = first_buffer = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors; n_left_from = from_frame->n_vectors;
@ -2776,7 +2776,8 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
if (tc0->flags & TCP_CONN_FINPNDG) if (tc0->flags & TCP_CONN_FINPNDG)
{ {
/* TX fifo finally drained */ /* TX fifo finally drained */
if (!session_tx_fifo_max_dequeue (&tc0->connection)) max_dequeue = session_tx_fifo_max_dequeue (&tc0->connection);
if (max_dequeue <= tc0->burst_acked)
tcp_send_fin (tc0); tcp_send_fin (tc0);
} }
/* If FIN is ACKed */ /* If FIN is ACKed */