hsa: Improve for mq-intensive

Type: feature

* Remove the retry mechanism in case of failed connect
* Limit the number of simultaneous connects (default
to mqsize / 2)

Change-Id: I7a0ed2e349ee3e8ca71639c2a2ec4cdf84f5c13e
Signed-off-by: Nathan Skrzypczak <nathan.skrzypczak@gmail.com>
This commit is contained in:
Nathan Skrzypczak
2019-11-22 11:26:19 +01:00
committed by Dave Wallace
parent 952a7b8b74
commit b2fce36c8e
5 changed files with 31 additions and 46 deletions
+17 -21
View File
@@ -587,6 +587,8 @@ session_connected_handler (session_connected_msg_t * mp)
u32 listener_index = htonl (mp->context);
svm_fifo_t *rx_fifo, *tx_fifo;
clib_atomic_add_fetch (&em->max_sim_connects, 1);
if (mp->retval)
{
if (em->proto_cb_vft->connected_cb)
@@ -738,23 +740,6 @@ echo_process_rpcs (echo_main_t * em)
}
}
static inline int
echo_mq_dequeue_batch (svm_msg_q_t * mq, svm_msg_q_msg_t * msg_vec,
u32 n_max_msg)
{
svm_msg_q_msg_t *msg;
u32 n_msgs;
int i;
n_msgs = clib_min (svm_msg_q_size (mq), n_max_msg);
for (i = 0; i < n_msgs; i++)
{
vec_add2 (msg_vec, msg, 1);
svm_msg_q_sub_w_lock (mq, msg);
}
return n_msgs;
}
static void *
echo_mq_thread_fn (void *arg)
{
@@ -784,7 +769,11 @@ echo_mq_thread_fn (void *arg)
svm_msg_q_unlock (mq);
continue;
}
echo_mq_dequeue_batch (mq, msg_vec, ~0);
for (i = 0; i < svm_msg_q_size (mq); i++)
{
vec_add2 (msg_vec, msg, 1);
svm_msg_q_sub_w_lock (mq, msg);
}
svm_msg_q_unlock (mq);
for (i = 0; i < vec_len (msg_vec); i++)
@@ -839,13 +828,14 @@ print_usage_and_exit (void)
int i;
fprintf (stderr,
"Usage: vpp_echo [socket-name SOCKET] [client|server] [uri URI] [OPTIONS]\n"
"Generates traffic and assert correct teardown of the QUIC hoststack\n"
"Generates traffic and assert correct teardown of the hoststack\n"
"\n"
" socket-name PATH Specify the binary socket path to connect to VPP\n"
" use-svm-api Use SVM API to connect to VPP\n"
" test-bytes[:assert] Check data correctness when receiving (assert fails on first error)\n"
" fifo-size N[K|M|G] Use N[K|M|G] fifos\n"
" mq-size N Use N event slots for vpp_echo <-> vpp events\n"
" mq-size N Use mq with N slots for [vpp_echo->vpp] communication\n"
" max-sim-connects N Do not allow more than N mq events inflight\n"
" rx-buf N[K|M|G] Use N[Kb|Mb|GB] RX buffer\n"
" tx-buf N[K|M|G] Use N[Kb|Mb|GB] TX test buffer\n"
" appns NAMESPACE Use the namespace NAMESPACE\n"
@@ -1008,6 +998,8 @@ echo_process_opts (int argc, char **argv)
echo_unformat_timing_event, &em->timing.start_event,
echo_unformat_timing_event, &em->timing.end_event))
;
else if (unformat (a, "max-sim-connects %d", &em->max_sim_connects))
;
else
print_usage_and_exit ();
}
@@ -1040,6 +1032,9 @@ echo_process_opts (int argc, char **argv)
if (em->send_stream_disconnects == ECHO_CLOSE_F_INVALID)
em->send_stream_disconnects = default_f_active;
if (em->max_sim_connects == 0)
em->max_sim_connects = em->evt_q_size >> 1;
if (em->wait_for_gdb)
{
volatile u64 nop = 0;
@@ -1091,7 +1086,7 @@ main (int argc, char **argv)
char *app_name;
u64 i;
svm_msg_q_cfg_t _cfg, *cfg = &_cfg;
u32 rpc_queue_size = 64 << 10;
u32 rpc_queue_size = 256 << 10;
em->session_index_by_vpp_handles = hash_create (0, sizeof (uword));
clib_spinlock_init (&em->sid_vpp_handles_lock);
@@ -1118,6 +1113,7 @@ main (int argc, char **argv)
em->tx_buf_size = 1 << 20;
em->data_source = ECHO_INVALID_DATA_SOURCE;
em->uri = format (0, "%s%c", "tcp://0.0.0.0/1234", 0);
em->max_sim_connects = 0;
em->crypto_engine = CRYPTO_ENGINE_NONE;
echo_set_each_proto_defaults_before_opts (em);
echo_process_opts (argc, argv);
+4
View File
@@ -149,6 +149,10 @@ echo_send_connect (u64 parent_session_handle, u32 opaque)
session_connect_msg_t *mp;
svm_msg_q_t *mq = em->ctrl_mq;
clib_atomic_sub_fetch (&em->max_sim_connects, 1);
while (em->max_sim_connects <= 0)
;
app_alloc_ctrl_evt_to_vpp (mq, app_evt, SESSION_CTRL_EVT_CONNECT);
mp = (session_connect_msg_t *) app_evt->evt->data;
memset (mp, 0, sizeof (*mp));
+2 -2
View File
@@ -530,13 +530,13 @@ echo_send_rpc (echo_main_t * em, void *fp, void *arg, u32 opaque)
echo_rpc_msg_t *evt;
if (PREDICT_FALSE (svm_msg_q_lock (em->rpc_msq_queue)))
{
ECHO_LOG (1, "RPC lock failed");
ECHO_FAIL (ECHO_FAIL_RPC_SIZE, "RPC lock failed");
return -1;
}
if (PREDICT_FALSE (svm_msg_q_ring_is_full (em->rpc_msq_queue, 0)))
{
svm_msg_q_unlock (em->rpc_msq_queue);
ECHO_LOG (1, "RPC ring is full");
ECHO_FAIL (ECHO_FAIL_RPC_SIZE, "RPC ring is full");
return -2;
}
msg = svm_msg_q_alloc_msg_w_ring (em->rpc_msq_queue, 0);
+4 -1
View File
@@ -95,7 +95,8 @@
_(ECHO_FAIL_TEST_ASSERT_RX_TOTAL, "ECHO_FAIL_TEST_ASSERT_RX_TOTAL") \
_(ECHO_FAIL_TEST_ASSERT_TX_TOTAL, "ECHO_FAIL_TEST_ASSERT_TX_TOTAL") \
_(ECHO_FAIL_TEST_ASSERT_ALL_SESSIONS_CLOSED, \
"ECHO_FAIL_TEST_ASSERT_ALL_SESSIONS_CLOSED")
"ECHO_FAIL_TEST_ASSERT_ALL_SESSIONS_CLOSED") \
_(ECHO_FAIL_RPC_SIZE, "ECHO_FAIL_RPC_SIZE")
typedef enum
{
@@ -328,6 +329,8 @@ typedef struct
volatile u32 n_clients_connected; /* Number of STREAM sessions connected */
volatile u32 nxt_available_sidx; /* next unused prealloced session_index */
volatile int max_sim_connects;
/* VNET_API_ERROR_FOO -> "Foo" hash table */
uword *error_string_by_error_number;
echo_proto_cb_vft_t *available_proto_cb_vft[TRANSPORT_N_PROTO];
+4 -22
View File
@@ -298,33 +298,15 @@ quic_echo_on_connected (session_connected_msg_t * mp, u32 session_index)
}
}
static void
quic_echo_retry_connect (u32 session_index)
{
/* retry connect */
echo_session_t *session;
echo_main_t *em = &echo_main;
if (session_index == SESSION_INVALID_INDEX)
{
ECHO_LOG (1, "Retrying Qsession connect");
echo_send_rpc (em, echo_send_connect, (void *) SESSION_INVALID_HANDLE,
SESSION_INVALID_INDEX);
}
else
{
session = pool_elt_at_index (em->sessions, session_index);
ECHO_LOG (1, "Retrying connect %U", echo_format_session, session);
echo_send_rpc (em, echo_send_connect,
(void *) session->vpp_session_handle, session_index);
}
}
static void
quic_echo_connected_cb (session_connected_bundled_msg_t * mp,
u32 session_index, u8 is_failed)
{
if (is_failed)
return quic_echo_retry_connect (session_index);
{
ECHO_FAIL (ECHO_FAIL_QUIC_WRONG_CONNECT, "Echo connect failed");
return;
}
return quic_echo_on_connected ((session_connected_msg_t *) mp,
session_index);
}