dpdk-ipsec: fix encrypt/decrypt single queue
When the same worker thread processes packet for encrypt and decrypt, ie. single worker with bi-directional traffic, given that the queue is shared results in packets to be decrypted being dropped as the encrypt always happens first for each main loop. With this change, each crypto device queue is logically split into two queues, each half the real size, avoiding the described problem. Change-Id: Ifd3f15e316c92fbd6ca05802456b10a7f73f85da Signed-off-by: Sergio Gonzalez Monroy <sgmonroy@gmail.com> (cherry picked from commit d8a34a57b12200000bb42d1c55f1a99a0a473f4b)
This commit is contained in:
Sergio Gonzalez Monroy
committed by
Neale Ranns
parent
ad62a0e91b
commit
d37b3d96c5
@ -27,10 +27,10 @@ format_crypto_resource (u8 * s, va_list * args)
|
||||
|
||||
crypto_resource_t *res = vec_elt_at_index (dcm->resource, res_idx);
|
||||
|
||||
s =
|
||||
format (s, "%U thr_id %3d qp %2u inflight %u\n",
|
||||
format_white_space, indent, (i16) res->thread_idx,
|
||||
res->qp_id, res->inflights);
|
||||
|
||||
s = format (s, "%U thr_id %3d qp %2u enc_inflight %u, dec_inflights %u\n",
|
||||
format_white_space, indent, (i16) res->thread_idx,
|
||||
res->qp_id, res->inflights[0], res->inflights[1]);
|
||||
|
||||
return s;
|
||||
}
|
||||
|
@ -127,25 +127,24 @@ dpdk_crypto_dequeue (vlib_main_t * vm, crypto_worker_main_t * cwm,
|
||||
vlib_node_runtime_t * node, crypto_resource_t * res)
|
||||
{
|
||||
u8 numa = rte_socket_id ();
|
||||
u32 n_ops, n_deq;
|
||||
u32 n_ops, total_n_deq, n_deq[2];
|
||||
u32 bis[VLIB_FRAME_SIZE], *bi;
|
||||
u16 nexts[VLIB_FRAME_SIZE], *next;
|
||||
struct rte_crypto_op **ops;
|
||||
|
||||
n_deq[0] = 0;
|
||||
n_deq[1] = 0;
|
||||
bi = bis;
|
||||
next = nexts;
|
||||
ops = cwm->ops;
|
||||
|
||||
n_ops = n_deq = rte_cryptodev_dequeue_burst (res->dev_id,
|
||||
res->qp_id,
|
||||
ops, VLIB_FRAME_SIZE);
|
||||
|
||||
n_ops = total_n_deq = rte_cryptodev_dequeue_burst (res->dev_id,
|
||||
res->qp_id,
|
||||
ops, VLIB_FRAME_SIZE);
|
||||
/* no op dequeued, do not proceed */
|
||||
if (n_deq == 0)
|
||||
if (n_ops == 0)
|
||||
return 0;
|
||||
|
||||
res->inflights -= n_ops;
|
||||
|
||||
while (n_ops >= 4)
|
||||
{
|
||||
struct rte_crypto_op *op0, *op1, *op2, *op3;
|
||||
@ -183,6 +182,11 @@ dpdk_crypto_dequeue (vlib_main_t * vm, crypto_worker_main_t * cwm,
|
||||
bi[2] = crypto_op_get_priv (op2)->bi;
|
||||
bi[3] = crypto_op_get_priv (op3)->bi;
|
||||
|
||||
n_deq[crypto_op_get_priv (op0)->encrypt] += 1;
|
||||
n_deq[crypto_op_get_priv (op1)->encrypt] += 1;
|
||||
n_deq[crypto_op_get_priv (op2)->encrypt] += 1;
|
||||
n_deq[crypto_op_get_priv (op3)->encrypt] += 1;
|
||||
|
||||
dpdk_crypto_input_check_op (vm, node, op0, next + 0);
|
||||
dpdk_crypto_input_check_op (vm, node, op1, next + 1);
|
||||
dpdk_crypto_input_check_op (vm, node, op2, next + 2);
|
||||
@ -208,6 +212,8 @@ dpdk_crypto_dequeue (vlib_main_t * vm, crypto_worker_main_t * cwm,
|
||||
next[0] = crypto_op_get_priv (op0)->next;
|
||||
bi[0] = crypto_op_get_priv (op0)->bi;
|
||||
|
||||
n_deq[crypto_op_get_priv (op0)->encrypt] += 1;
|
||||
|
||||
dpdk_crypto_input_check_op (vm, node, op0, next + 0);
|
||||
|
||||
op0->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
|
||||
@ -220,15 +226,18 @@ dpdk_crypto_dequeue (vlib_main_t * vm, crypto_worker_main_t * cwm,
|
||||
}
|
||||
|
||||
vlib_node_increment_counter (vm, node->node_index,
|
||||
DPDK_CRYPTO_INPUT_ERROR_DQ_COPS, n_deq);
|
||||
DPDK_CRYPTO_INPUT_ERROR_DQ_COPS, total_n_deq);
|
||||
|
||||
vlib_buffer_enqueue_to_next (vm, node, bis, nexts, n_deq);
|
||||
res->inflights[0] -= n_deq[0];
|
||||
res->inflights[1] -= n_deq[1];
|
||||
|
||||
dpdk_crypto_input_trace (vm, node, res->dev_id, bis, nexts, n_deq);
|
||||
vlib_buffer_enqueue_to_next (vm, node, bis, nexts, total_n_deq);
|
||||
|
||||
crypto_free_ops (numa, cwm->ops, n_deq);
|
||||
dpdk_crypto_input_trace (vm, node, res->dev_id, bis, nexts, total_n_deq);
|
||||
|
||||
return n_deq;
|
||||
crypto_free_ops (numa, cwm->ops, total_n_deq);
|
||||
|
||||
return total_n_deq;
|
||||
}
|
||||
|
||||
static_always_inline uword
|
||||
@ -246,11 +255,13 @@ dpdk_crypto_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
vec_foreach (res_idx, cwm->resource_idx)
|
||||
{
|
||||
res = vec_elt_at_index (dcm->resource, res_idx[0]);
|
||||
u32 inflights = res->inflights[0] + res->inflights[1];
|
||||
|
||||
if (res->inflights)
|
||||
if (inflights)
|
||||
n_deq += dpdk_crypto_dequeue (vm, cwm, node, res);
|
||||
|
||||
if (PREDICT_FALSE (res->remove && !(res->inflights)))
|
||||
inflights = res->inflights[0] + res->inflights[1];
|
||||
if (PREDICT_FALSE (res->remove && !(inflights)))
|
||||
vec_add1 (remove, res_idx[0]);
|
||||
}
|
||||
/* *INDENT-ON* */
|
||||
|
@ -168,6 +168,7 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm,
|
||||
dpdk_op_priv_t *priv = crypto_op_get_priv (op);
|
||||
/* store bi in op private */
|
||||
priv->bi = bi0;
|
||||
priv->encrypt = 0;
|
||||
|
||||
u16 op_len =
|
||||
sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]);
|
||||
@ -372,7 +373,7 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm,
|
||||
from_frame->n_vectors);
|
||||
|
||||
crypto_enqueue_ops (vm, cwm, dpdk_esp6_decrypt_node.index,
|
||||
ESP_DECRYPT_ERROR_ENQ_FAIL, numa);
|
||||
ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ );
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -381,7 +382,7 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm,
|
||||
from_frame->n_vectors);
|
||||
|
||||
crypto_enqueue_ops (vm, cwm, dpdk_esp4_decrypt_node.index,
|
||||
ESP_DECRYPT_ERROR_ENQ_FAIL, numa);
|
||||
ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ );
|
||||
}
|
||||
|
||||
crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops);
|
||||
|
@ -195,6 +195,7 @@ dpdk_esp_encrypt_inline (vlib_main_t * vm,
|
||||
dpdk_op_priv_t *priv = crypto_op_get_priv (op);
|
||||
/* store bi in op private */
|
||||
priv->bi = bi0;
|
||||
priv->encrypt = 1;
|
||||
|
||||
u16 op_len =
|
||||
sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]);
|
||||
@ -564,7 +565,7 @@ dpdk_esp_encrypt_inline (vlib_main_t * vm,
|
||||
from_frame->n_vectors);
|
||||
|
||||
crypto_enqueue_ops (vm, cwm, dpdk_esp6_encrypt_node.index,
|
||||
ESP_ENCRYPT_ERROR_ENQ_FAIL, numa);
|
||||
ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ );
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -573,7 +574,7 @@ dpdk_esp_encrypt_inline (vlib_main_t * vm,
|
||||
from_frame->n_vectors);
|
||||
|
||||
crypto_enqueue_ops (vm, cwm, dpdk_esp4_encrypt_node.index,
|
||||
ESP_ENCRYPT_ERROR_ENQ_FAIL, numa);
|
||||
ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ );
|
||||
}
|
||||
|
||||
crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops);
|
||||
|
@ -639,9 +639,6 @@ crypto_parse_capabilities (crypto_dev_t * dev,
|
||||
}
|
||||
}
|
||||
|
||||
#define DPDK_CRYPTO_N_QUEUE_DESC 2048
|
||||
#define DPDK_CRYPTO_NB_SESS_OBJS 20000
|
||||
|
||||
static clib_error_t *
|
||||
crypto_dev_conf (u8 dev, u16 n_qp, u8 numa)
|
||||
{
|
||||
|
@ -30,6 +30,9 @@
|
||||
#define always_inline static inline __attribute__ ((__always_inline__))
|
||||
#endif
|
||||
|
||||
#define DPDK_CRYPTO_N_QUEUE_DESC 2048
|
||||
#define DPDK_CRYPTO_NB_SESS_OBJS 20000
|
||||
|
||||
#define foreach_dpdk_crypto_input_next \
|
||||
_(DROP, "error-drop") \
|
||||
_(IP4_LOOKUP, "ip4-lookup") \
|
||||
@ -59,9 +62,11 @@ typedef struct
|
||||
{
|
||||
u32 next;
|
||||
u32 bi;
|
||||
dpdk_gcm_cnt_blk cb __attribute__ ((aligned (16)));
|
||||
u8 encrypt;
|
||||
CLIB_ALIGN_MARK (mark0, 16);
|
||||
dpdk_gcm_cnt_blk cb;
|
||||
u8 aad[16];
|
||||
u8 icv[32];
|
||||
u8 icv[32]; /* XXX last 16B in next cache line */
|
||||
} dpdk_op_priv_t;
|
||||
|
||||
typedef struct
|
||||
@ -70,8 +75,8 @@ typedef struct
|
||||
struct rte_crypto_op **ops;
|
||||
u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG];
|
||||
u16 auth_resource_idx[IPSEC_INTEG_N_ALG];
|
||||
CLIB_CACHE_LINE_ALIGN_MARK (pad);
|
||||
} crypto_worker_main_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
|
||||
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
|
||||
} crypto_worker_main_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
@ -115,12 +120,13 @@ typedef struct
|
||||
u8 dev_id;
|
||||
u8 numa;
|
||||
u16 qp_id;
|
||||
u16 inflights;
|
||||
u16 inflights[2];
|
||||
u16 n_ops;
|
||||
u16 __unused;
|
||||
struct rte_crypto_op *ops[VLIB_FRAME_SIZE];
|
||||
u32 bi[VLIB_FRAME_SIZE];
|
||||
} crypto_resource_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
|
||||
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
|
||||
} crypto_resource_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
@ -130,15 +136,13 @@ typedef struct
|
||||
|
||||
typedef struct
|
||||
{
|
||||
CLIB_ALIGN_MARK (pad, 16); /* align up to 16 bytes for 32bit builds */
|
||||
struct rte_cryptodev_sym_session *session;
|
||||
u64 dev_mask;
|
||||
CLIB_ALIGN_MARK (pad, 16); /* align up to 16 bytes for 32bit builds */
|
||||
} crypto_session_by_drv_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
/* Required for vec_validate_aligned */
|
||||
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
|
||||
struct rte_mempool *crypto_op;
|
||||
struct rte_mempool *session_h;
|
||||
struct rte_mempool **session_drv;
|
||||
@ -149,6 +153,8 @@ typedef struct
|
||||
u64 *session_drv_failed;
|
||||
crypto_session_by_drv_t *session_by_drv_id_and_sa_index;
|
||||
clib_spinlock_t lockp;
|
||||
/* Required for vec_validate_aligned */
|
||||
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
|
||||
} crypto_data_t;
|
||||
|
||||
typedef struct
|
||||
@ -303,7 +309,7 @@ crypto_free_ops (u8 numa, struct rte_crypto_op **ops, u32 n)
|
||||
|
||||
static_always_inline void
|
||||
crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm,
|
||||
u32 node_index, u32 error, u8 numa)
|
||||
u32 node_index, u32 error, u8 numa, u8 encrypt)
|
||||
{
|
||||
dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
|
||||
crypto_resource_t *res;
|
||||
@ -312,15 +318,18 @@ crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm,
|
||||
/* *INDENT-OFF* */
|
||||
vec_foreach (res_idx, cwm->resource_idx)
|
||||
{
|
||||
u16 enq;
|
||||
u16 enq, n_ops;
|
||||
res = vec_elt_at_index (dcm->resource, res_idx[0]);
|
||||
|
||||
if (!res->n_ops)
|
||||
continue;
|
||||
|
||||
n_ops = (DPDK_CRYPTO_N_QUEUE_DESC / 2) - res->inflights[encrypt];
|
||||
n_ops = res->n_ops < n_ops ? res->n_ops : n_ops;
|
||||
enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id,
|
||||
res->ops, res->n_ops);
|
||||
res->inflights += enq;
|
||||
res->ops, n_ops);
|
||||
ASSERT (n_ops == enq);
|
||||
res->inflights[encrypt] += enq;
|
||||
|
||||
if (PREDICT_FALSE (enq < res->n_ops))
|
||||
{
|
||||
|
Reference in New Issue
Block a user