dpdk/ipsec: multiple fixes

- fix ESP transport mode
- safely free crypto sessions
- use rte_mempool_virt2phy/rte_mempool_virt2iova
- align DPDK QAT capabilities for IPsec usage (DPDK 17.08)
- reserve 16B for aad (reference cryptodev doc)

Change-Id: I3822a7456fb5a255c767f5a44a429f91a140fe64
Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
This commit is contained in:
Sergio Gonzalez Monroy
2017-11-26 15:25:43 +00:00
committed by Damjan Marion
parent 3a699b28bb
commit 99214ce0ae
6 changed files with 227 additions and 105 deletions
@@ -0,0 +1,101 @@
From f8184af94214f1c76c0ffda45b9de9243aea287c Mon Sep 17 00:00:00 2001
From: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Date: Tue, 17 Oct 2017 20:05:59 +0100
Subject: [PATCH] crypto/qat: align capabilities
Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
---
drivers/crypto/qat/qat_crypto_capabilities.h | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index d8d3fa1..00f8056 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -48,9 +48,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 20, \
+ .min = 12, \
.max = 20, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -69,9 +69,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 28, \
+ .min = 14, \
.max = 28, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -90,9 +90,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 32, \
+ .min = 16, \
.max = 32, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -111,9 +111,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 48, \
+ .min = 24, \
.max = 48, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -132,9 +132,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 64, \
+ .min = 32, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -153,9 +153,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 16, \
+ .min = 12, \
.max = 16, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -174,9 +174,9 @@
.increment = 0 \
}, \
.digest_size = { \
- .min = 16, \
+ .min = 12, \
.max = 16, \
- .increment = 0 \
+ .increment = 1 \
}, \
.aad_size = { 0 }, \
.iv_size = { 0 } \
--
2.9.5
-2
View File
@@ -230,8 +230,6 @@ dpdk_crypto_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
vec_free (remove);
}
/* TODO Clear all sessions in device */
return n_deq;
}
+14 -25
View File
@@ -264,7 +264,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
}
u32 cipher_off, cipher_len;
u32 auth_len = 0, aad_size = 0;
u32 auth_len = 0;
u8 *aad = NULL;
u8 *iv = (u8 *) (esp0 + 1);
@@ -285,20 +285,19 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
u32 *_iv = (u32 *) iv;
crypto_set_icb (icb, sa0->salt, _iv[0], _iv[1]);
iv_size = 12;
}
if (is_aead)
{
aad = priv->aad;
clib_memcpy(aad, esp0, 8);
u32 * _aad = (u32 *) aad;
clib_memcpy (aad, esp0, 8);
/* _aad[3] should always be 0 */
if (PREDICT_FALSE (sa0->use_esn))
{
*((u32*)&aad[8]) = sa0->seq_hi;
aad_size = 12;
}
_aad[2] = clib_host_to_net_u32 (sa0->seq_hi);
else
aad_size = 8;
_aad[2] = 0;
}
else
{
@@ -307,7 +306,8 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
if (sa0->use_esn)
{
clib_memcpy (priv->icv, digest, trunc_size);
*((u32*) digest) = sa0->seq_hi;
u32 *_digest = (u32 *) digest;
_digest[0] = clib_host_to_net_u32 (sa0->seq_hi);
auth_len += sizeof(sa0->seq_hi);
digest = priv->icv;
@@ -316,10 +316,8 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm,
}
}
crypto_op_setup (is_aead, mb0, op, session,
cipher_off, cipher_len, (u8 *) icb, iv_size,
0, auth_len, aad, aad_size,
digest, digest_paddr, trunc_size);
crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len,
0, auth_len, aad, digest, digest_paddr);
trace:
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -522,22 +520,13 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm,
memmove(oh4, ih4, ih4_len);
next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
u16 old_ttl_prot =
((u16) oh4->ttl) << 8 | (u16) oh4->protocol;
u16 new_ttl_prot =
((u16) oh4->ttl) << 8 | (u16) f0->next_header;
oh4->protocol = f0->next_header;
u16 new_len = clib_host_to_net_u16 (b0->current_length);
oh4->length = new_len;
/* rfc1264 incremental checksum update */
oh4->checksum = ~(~oh4->checksum + ~oh4->length + new_len +
~old_ttl_prot + new_ttl_prot);
oh4->length = clib_host_to_net_u16 (b0->current_length);
oh4->checksum = ip4_header_checksum(oh4);
}
else if ((ih4->ip_version_and_header_length & 0xF0) == 0x60)
{
/* FIXME find ip header */
ih6 = (ip6_header_t *) (b0->data + sizeof(ethernet_header_t));
ih6 = (ip6_header_t *) ih4;
vlib_buffer_advance (b0, -sizeof(ip6_header_t));
oh6 = vlib_buffer_get_current (b0);
memmove(oh6, ih6, sizeof(ip6_header_t));
+17 -22
View File
@@ -346,10 +346,10 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
priv->next = DPDK_CRYPTO_INPUT_NEXT_INTERFACE_OUTPUT;
u16 rewrite_len = vnet_buffer (b0)->ip.save_rewrite_length;
u16 adv = sizeof (esp_header_t) + iv_size;
vlib_buffer_advance (b0, -rewrite_len - adv);
vlib_buffer_advance (b0, -adv - rewrite_len);
u8 *src = ((u8 *) ih0) - rewrite_len;
u8 *dst = vlib_buffer_get_current (b0);
oh0 = (ip4_and_esp_header_t *) (dst + rewrite_len);
oh0 = vlib_buffer_get_current (b0) + rewrite_len;
if (is_ipv6)
{
@@ -363,13 +363,12 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
}
else /* ipv4 */
{
orig_sz -= ip4_header_bytes (&ih0->ip4);
u16 ip_size = ip4_header_bytes (&ih0->ip4);
orig_sz -= ip_size;
next_hdr_type = ih0->ip4.protocol;
memmove (dst, src,
rewrite_len + ip4_header_bytes (&ih0->ip4));
memmove (dst, src, rewrite_len + ip_size);
oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
esp0 =
(esp_header_t *) (oh0 + ip4_header_bytes (&ih0->ip4));
esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size);
}
esp0->spi = clib_host_to_net_u32 (sa0->spi);
esp0->seq = clib_host_to_net_u32 (sa0->seq);
@@ -383,6 +382,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
u8 *padding =
vlib_buffer_put_uninit (b0, pad_bytes + 2 + trunc_size);
/* The extra pad bytes would be overwritten by the digest */
if (pad_bytes)
clib_memcpy (padding, pad_data, 16);
@@ -410,9 +410,9 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
mb0->pkt_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0);
mb0->data_off = ((void *) esp0) - mb0->buf_addr;
u32 cipher_off, cipher_len;
u32 auth_len = 0, aad_size = 0;
u32 cipher_off, cipher_len, auth_len = 0;
u32 *aad = NULL;
u8 *digest = vlib_buffer_get_tail (b0) - trunc_size;
u64 digest_paddr =
mb0->buf_physaddr + digest - ((u8 *) mb0->buf_addr);
@@ -430,8 +430,6 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
cipher_off = sizeof (esp_header_t) + iv_size;
cipher_len = pad_payload_len;
iv_size = 12; /* CTR/GCM IV size, not ESP IV size */
}
if (is_aead)
@@ -440,13 +438,11 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
aad[0] = clib_host_to_net_u32 (sa0->spi);
aad[1] = clib_host_to_net_u32 (sa0->seq);
if (sa0->use_esn)
{
aad[2] = clib_host_to_net_u32 (sa0->seq_hi);
aad_size = 12;
}
/* aad[3] should always be 0 */
if (PREDICT_FALSE (sa0->use_esn))
aad[2] = clib_host_to_net_u32 (sa0->seq_hi);
else
aad_size = 8;
aad[2] = 0;
}
else
{
@@ -454,15 +450,14 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm,
vlib_buffer_get_tail (b0) - ((u8 *) esp0) - trunc_size;
if (sa0->use_esn)
{
*((u32 *) digest) = sa0->seq_hi;
u32 *_digest = (u32 *) digest;
_digest[0] = clib_host_to_net_u32 (sa0->seq_hi);
auth_len += 4;
}
}
crypto_op_setup (is_aead, mb0, op, session,
cipher_off, cipher_len, (u8 *) icb, iv_size,
0, auth_len, (u8 *) aad, aad_size,
digest, digest_paddr, trunc_size);
crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len,
0, auth_len, (u8 *) aad, digest, digest_paddr);
trace:
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+76 -46
View File
@@ -328,11 +328,14 @@ create_sym_session (struct rte_cryptodev_sym_session **session,
struct rte_crypto_sym_xform cipher_xform = { 0 };
struct rte_crypto_sym_xform auth_xform = { 0 };
struct rte_crypto_sym_xform *xfs;
struct rte_cryptodev_sym_session **s;
crypto_session_key_t key = { 0 };
key.drv_id = res->drv_id;
key.sa_idx = sa_idx;
data = vec_elt_at_index (dcm->data, res->numa);
sa = pool_elt_at_index (im->sad, sa_idx);
if ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) |
@@ -359,16 +362,14 @@ create_sym_session (struct rte_cryptodev_sym_session **session,
}
}
data = vec_elt_at_index (dcm->data, res->numa);
/*
* DPDK_VER >= 1708:
* Multiple worker/threads share the session for an SA
* Single session per SA, initialized for each device driver
*/
session[0] = (void *) hash_get (data->session_by_sa_index, sa_idx);
s = (void *) hash_get (data->session_by_sa_index, sa_idx);
if (!session[0])
if (!s)
{
session[0] = rte_cryptodev_sym_session_create (data->session_h);
if (!session[0])
@@ -378,6 +379,8 @@ create_sym_session (struct rte_cryptodev_sym_session **session,
}
hash_set (data->session_by_sa_index, sa_idx, session[0]);
}
else
session[0] = s[0];
struct rte_mempool **mp;
mp = vec_elt_at_index (data->session_drv, res->drv_id);
@@ -392,7 +395,7 @@ create_sym_session (struct rte_cryptodev_sym_session **session,
res->drv_id);
}
hash_set (cwm->session_by_drv_id_and_sa_index, key.val, session[0]);
hash_set (data->session_by_drv_id_and_sa_index, key.val, session[0]);
return 0;
}
@@ -422,19 +425,59 @@ set_session_private_data (struct rte_cryptodev_sym_session *sess,
sess->sess_private_data[driver_id] = private_data;
}
static clib_error_t *
dpdk_crypto_session_disposal (crypto_session_disposal_t * v, u64 ts)
{
dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
crypto_session_disposal_t *s;
void *drv_session;
u32 drv_id;
i32 ret;
/* *INDENT-OFF* */
vec_foreach (s, v)
{
/* ordered vector by timestamp */
if (!(s->ts + dcm->session_timeout < ts))
break;
vec_foreach_index (drv_id, dcm->drv)
{
drv_session = get_session_private_data (s->session, drv_id);
if (!drv_session)
continue;
/*
* Custom clear to avoid finding a dev_id for drv_id:
* ret = rte_cryptodev_sym_session_clear (dev_id, drv_session);
* ASSERT (!ret);
*/
clear_and_free_obj (drv_session);
set_session_private_data (s->session, drv_id, NULL);
}
ret = rte_cryptodev_sym_session_free (s->session);
ASSERT (!ret);
}
/* *INDENT-ON* */
if (s < vec_end (v))
vec_delete (v, s - v, 0);
return 0;
}
static clib_error_t *
add_del_sa_session (u32 sa_index, u8 is_add)
{
ipsec_main_t *im = &ipsec_main;
dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
crypto_worker_main_t *cwm;
crypto_data_t *data;
struct rte_cryptodev_sym_session *s;
crypto_session_key_t key = { 0 };
uword *val;
u32 drv_id;
i32 ret;
key.sa_idx = sa_index;
if (is_add)
{
@@ -456,28 +499,8 @@ add_del_sa_session (u32 sa_index, u8 is_add)
return 0;
}
/* XXX Wait N cycles to be sure session is not in use OR
* keep refcnt at SA level per worker/thread ? */
unix_sleep (0.2);
key.sa_idx = sa_index;
/* *INDENT-OFF* */
vec_foreach (cwm, dcm->workers_main)
{
for (drv_id = 0; drv_id < dcm->max_drv_id; drv_id++)
{
key.drv_id = drv_id;
val = hash_get (cwm->session_by_drv_id_and_sa_index, key.val);
s = (struct rte_cryptodev_sym_session *) val;
if (!s)
continue;
hash_unset (cwm->session_by_drv_id_and_sa_index, key.val);
}
}
/* *INDENT-ON* */
crypto_data_t *data;
/* *INDENT-OFF* */
vec_foreach (data, dcm->data)
{
@@ -487,27 +510,28 @@ add_del_sa_session (u32 sa_index, u8 is_add)
if (!s)
continue;
hash_unset (data->session_by_sa_index, sa_index);
void *drv_session;
vec_foreach_index (drv_id, dcm->drv)
{
drv_session = get_session_private_data (s, drv_id);
if (!drv_session)
key.drv_id = drv_id;
val = hash_get (data->session_by_drv_id_and_sa_index, key.val);
s = (struct rte_cryptodev_sym_session *) val;
if (!s)
continue;
/*
* Custom clear to avoid finding a dev_id for drv_id:
* ret = rte_cryptodev_sym_session_clear (dev_id, drv_session);
* ASSERT (!ret);
*/
clear_and_free_obj (drv_session);
set_session_private_data (s, drv_id, NULL);
hash_unset (data->session_by_drv_id_and_sa_index, key.val);
}
ret = rte_cryptodev_sym_session_free(s);
ASSERT (!ret);
hash_unset (data->session_by_sa_index, sa_index);
u64 ts = unix_time_now_nsec ();
dpdk_crypto_session_disposal (data->session_disposal, ts);
crypto_session_disposal_t sd;
sd.ts = ts;
sd.session = s;
vec_add1 (data->session_disposal, sd);
}
/* *INDENT-ON* */
@@ -782,7 +806,11 @@ crypto_op_init (struct rte_mempool *mempool,
op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
op->phys_addr = rte_mem_virt2phy (_obj);
#if RTE_VERSION < RTE_VERSION_NUM(17, 11, 0, 0)
op->phys_addr = rte_mempool_virt2phy (NULL, _obj);
#else
op->phys_addr = rte_mempool_virt2iova (_obj);
#endif
op->mempool = mempool;
}
@@ -985,6 +1013,8 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
return 0;
}
dcm->session_timeout = 10e9;
vec_validate_init_empty_aligned (dcm->workers_main, n_mains - 1,
(crypto_worker_main_t) EMPTY_STRUCT,
CLIB_CACHE_LINE_BYTES);
+19 -10
View File
@@ -56,16 +56,15 @@ typedef struct
typedef struct
{
dpdk_gcm_cnt_blk cb;
u8 aad[12];
u32 next;
dpdk_gcm_cnt_blk cb __attribute__ ((aligned (16)));
u8 aad[16];
u8 icv[32];
} dpdk_op_priv_t __attribute__ ((aligned (16)));
} dpdk_op_priv_t;
typedef struct
{
u16 *resource_idx;
uword *session_by_drv_id_and_sa_index;
struct rte_crypto_op **ops;
u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG];
u16 auth_resource_idx[IPSEC_INTEG_N_ALG];
@@ -119,12 +118,20 @@ typedef struct
u32 bi[VLIB_FRAME_SIZE];
} crypto_resource_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
typedef struct
{
u64 ts;
struct rte_cryptodev_sym_session *session;
} crypto_session_disposal_t;
typedef struct
{
struct rte_mempool *crypto_op;
struct rte_mempool *session_h;
struct rte_mempool **session_drv;
crypto_session_disposal_t *session_disposal;
uword *session_by_sa_index;
uword *session_by_drv_id_and_sa_index;
u64 crypto_op_get_failed;
u64 session_h_failed;
u64 *session_drv_failed;
@@ -140,7 +147,7 @@ typedef struct
crypto_alg_t *auth_algs;
crypto_data_t *data;
crypto_drv_t *drv;
u8 max_drv_id;
u64 session_timeout; /* nsec */
u8 enabled;
} dpdk_crypto_main_t;
@@ -158,7 +165,7 @@ clib_error_t *create_sym_session (struct rte_cryptodev_sym_session **session,
static_always_inline u32
crypto_op_len (void)
{
const u32 align = 16;
const u32 align = 4;
u32 op_size =
sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
@@ -200,12 +207,16 @@ crypto_get_session (struct rte_cryptodev_sym_session **session,
crypto_resource_t * res,
crypto_worker_main_t * cwm, u8 is_outbound)
{
dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
crypto_data_t *data;
uword *val;
crypto_session_key_t key = { 0 };
key.drv_id = res->drv_id;
key.sa_idx = sa_idx;
uword *val = hash_get (cwm->session_by_drv_id_and_sa_index, key.val);
data = vec_elt_at_index (dcm->data, res->numa);
val = hash_get (data->session_by_drv_id_and_sa_index, key.val);
if (PREDICT_FALSE (!val))
return create_sym_session (session, sa_idx, res, cwm, is_outbound);
@@ -314,10 +325,8 @@ static_always_inline void
crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0,
struct rte_crypto_op *op, void *session,
u32 cipher_off, u32 cipher_len,
u8 * icb __clib_unused, u32 iv_size __clib_unused,
u32 auth_off, u32 auth_len,
u8 * aad __clib_unused, u32 aad_size __clib_unused,
u8 * digest, u64 digest_paddr, u32 digest_size __clib_unused)
u8 * aad, u8 * digest, u64 digest_paddr)
{
struct rte_crypto_sym_op *sym_op;