octeon: add crypto framework
Configure crypto device. Add crypto support in control plane and data plane. Control plane - Handle vnet crypto key add and delete - Register crypto async enqueue and dequeue handlers Data plane - Add encryption and decryption support for - AES-GCM - AES-CBC hmac sha1/256/384/512 - AES-CTR sha1 - 3DES-CBC md5 sha1/256/384/512 Type: feature Signed-off-by: Nithinsen Kaithakadan <nkaithakadan@marvell.com> Signed-off-by: Monendra Singh Kushwaha <kmonendra@marvell.com> Change-Id: Ia9e16c61ed84800a59e0c932a4ba6aa1423c1ec8
This commit is contained in:

committed by
Damjan Marion

parent
17a918133b
commit
6937c0b2df
@ -36,6 +36,7 @@ add_vpp_plugin(dev_octeon
|
||||
tx_node.c
|
||||
flow.c
|
||||
counter.c
|
||||
crypto.c
|
||||
|
||||
MULTIARCH_SOURCES
|
||||
rx_node.c
|
||||
|
1754
src/plugins/dev_octeon/crypto.c
Normal file
1754
src/plugins/dev_octeon/crypto.c
Normal file
File diff suppressed because it is too large
Load Diff
174
src/plugins/dev_octeon/crypto.h
Normal file
174
src/plugins/dev_octeon/crypto.h
Normal file
@ -0,0 +1,174 @@
|
||||
/*
|
||||
* Copyright (c) 2024 Marvell.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
* https://spdx.org/licenses/Apache-2.0.html
|
||||
*/
|
||||
|
||||
#ifndef _CRYPTO_H_
|
||||
#define _CRYPTO_H_
|
||||
#include <vnet/crypto/crypto.h>
|
||||
#include <vnet/ip/ip.h>
|
||||
|
||||
#define OCT_MAX_N_CPT_DEV 2
|
||||
|
||||
#define OCT_CPT_LF_MAX_NB_DESC 128000
|
||||
|
||||
/* CRYPTO_ID, KEY_LENGTH_IN_BYTES, TAG_LEN, AAD_LEN */
|
||||
#define foreach_oct_crypto_aead_async_alg \
|
||||
_ (AES_128_GCM, 16, 16, 8) \
|
||||
_ (AES_128_GCM, 16, 16, 12) \
|
||||
_ (AES_192_GCM, 24, 16, 8) \
|
||||
_ (AES_192_GCM, 24, 16, 12) \
|
||||
_ (AES_256_GCM, 32, 16, 8) \
|
||||
_ (AES_256_GCM, 32, 16, 12)
|
||||
|
||||
/* CRYPTO_ID, INTEG_ID, KEY_LENGTH_IN_BYTES, DIGEST_LEN */
|
||||
#define foreach_oct_crypto_link_async_alg \
|
||||
_ (AES_128_CBC, SHA1, 16, 12) \
|
||||
_ (AES_192_CBC, SHA1, 24, 12) \
|
||||
_ (AES_256_CBC, SHA1, 32, 12) \
|
||||
_ (AES_128_CBC, SHA256, 16, 16) \
|
||||
_ (AES_192_CBC, SHA256, 24, 16) \
|
||||
_ (AES_256_CBC, SHA256, 32, 16) \
|
||||
_ (AES_128_CBC, SHA384, 16, 24) \
|
||||
_ (AES_192_CBC, SHA384, 24, 24) \
|
||||
_ (AES_256_CBC, SHA384, 32, 24) \
|
||||
_ (AES_128_CBC, SHA512, 16, 32) \
|
||||
_ (AES_192_CBC, SHA512, 24, 32) \
|
||||
_ (AES_256_CBC, SHA512, 32, 32) \
|
||||
_ (3DES_CBC, MD5, 24, 12) \
|
||||
_ (3DES_CBC, SHA1, 24, 12) \
|
||||
_ (3DES_CBC, SHA256, 24, 16) \
|
||||
_ (3DES_CBC, SHA384, 24, 24) \
|
||||
_ (3DES_CBC, SHA512, 24, 32) \
|
||||
_ (AES_128_CTR, SHA1, 16, 12) \
|
||||
_ (AES_192_CTR, SHA1, 24, 12) \
|
||||
_ (AES_256_CTR, SHA1, 32, 12)
|
||||
|
||||
#define OCT_MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
|
||||
|
||||
#define OCT_SCATTER_GATHER_BUFFER_SIZE 1024
|
||||
|
||||
#define CPT_LMT_SIZE_COPY (sizeof (struct cpt_inst_s) / 16)
|
||||
#define OCT_MAX_LMT_SZ 16
|
||||
|
||||
#define SRC_IOV_SIZE \
|
||||
(sizeof (struct roc_se_iov_ptr) + \
|
||||
(sizeof (struct roc_se_buf_ptr) * ROC_MAX_SG_CNT))
|
||||
|
||||
#define OCT_CPT_LMT_GET_LINE_ADDR(lmt_addr, lmt_num) \
|
||||
(void *) ((u64) (lmt_addr) + ((u64) (lmt_num) << ROC_LMT_LINE_SIZE_LOG2))
|
||||
|
||||
typedef struct
|
||||
{
|
||||
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
|
||||
struct roc_cpt *roc_cpt;
|
||||
struct roc_cpt_lmtline lmtline;
|
||||
struct roc_cpt_lf lf;
|
||||
vnet_dev_t *dev;
|
||||
} oct_crypto_dev_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
|
||||
/** CPT opcode */
|
||||
u16 cpt_op : 4;
|
||||
/** Flag for AES GCM */
|
||||
u16 aes_gcm : 1;
|
||||
/** IV length in bytes */
|
||||
u8 iv_length;
|
||||
/** Auth IV length in bytes */
|
||||
u8 auth_iv_length;
|
||||
/** IV offset in bytes */
|
||||
u16 iv_offset;
|
||||
/** Auth IV offset in bytes */
|
||||
u16 auth_iv_offset;
|
||||
/** CPT inst word 7 */
|
||||
u64 cpt_inst_w7;
|
||||
/* initialise as part of first packet */
|
||||
u8 initialised;
|
||||
/* store link key index in case of linked algo */
|
||||
vnet_crypto_key_index_t key_index;
|
||||
oct_crypto_dev_t *crypto_dev;
|
||||
struct roc_se_ctx cpt_ctx;
|
||||
} oct_crypto_sess_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
|
||||
oct_crypto_sess_t *sess;
|
||||
oct_crypto_dev_t *crypto_dev;
|
||||
} oct_crypto_key_t;
|
||||
|
||||
typedef struct oct_crypto_scatter_gather
|
||||
{
|
||||
u8 buf[OCT_SCATTER_GATHER_BUFFER_SIZE];
|
||||
} oct_crypto_scatter_gather_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
|
||||
/** Result data of all entries in the frame */
|
||||
volatile union cpt_res_s res[VNET_CRYPTO_FRAME_SIZE];
|
||||
/** Scatter gather data */
|
||||
void *sg_data;
|
||||
/** Frame pointer */
|
||||
vnet_crypto_async_frame_t *frame;
|
||||
/** Number of async elements in frame */
|
||||
u16 elts;
|
||||
/** Next read entry in frame, when dequeue */
|
||||
u16 deq_elts;
|
||||
} oct_crypto_inflight_req_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
/** Array of pending request */
|
||||
oct_crypto_inflight_req_t *req_queue;
|
||||
/** Number of inflight operations in queue */
|
||||
u32 n_crypto_inflight;
|
||||
/** Tail of queue to be used for enqueue */
|
||||
u16 enq_tail;
|
||||
/** Head of queue to be used for dequeue */
|
||||
u16 deq_head;
|
||||
/** Number of descriptors */
|
||||
u16 n_desc;
|
||||
} oct_crypto_pending_queue_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
oct_crypto_dev_t *crypto_dev[OCT_MAX_N_CPT_DEV];
|
||||
oct_crypto_key_t *keys[VNET_CRYPTO_ASYNC_OP_N_TYPES];
|
||||
oct_crypto_pending_queue_t *pend_q;
|
||||
int n_cpt;
|
||||
u8 started;
|
||||
} oct_crypto_main_t;
|
||||
|
||||
extern oct_crypto_main_t oct_crypto_main;
|
||||
|
||||
void oct_crypto_key_del_handler (vlib_main_t *vm,
|
||||
vnet_crypto_key_index_t key_index);
|
||||
|
||||
void oct_crypto_key_add_handler (vlib_main_t *vm,
|
||||
vnet_crypto_key_index_t key_index);
|
||||
|
||||
void oct_crypto_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
|
||||
vnet_crypto_key_index_t idx);
|
||||
|
||||
int oct_crypto_enqueue_linked_alg_enc (vlib_main_t *vm,
|
||||
vnet_crypto_async_frame_t *frame);
|
||||
int oct_crypto_enqueue_linked_alg_dec (vlib_main_t *vm,
|
||||
vnet_crypto_async_frame_t *frame);
|
||||
int oct_crypto_enqueue_aead_aad_8_enc (vlib_main_t *vm,
|
||||
vnet_crypto_async_frame_t *frame);
|
||||
int oct_crypto_enqueue_aead_aad_12_enc (vlib_main_t *vm,
|
||||
vnet_crypto_async_frame_t *frame);
|
||||
int oct_crypto_enqueue_aead_aad_8_dec (vlib_main_t *vm,
|
||||
vnet_crypto_async_frame_t *frame);
|
||||
int oct_crypto_enqueue_aead_aad_12_dec (vlib_main_t *vm,
|
||||
vnet_crypto_async_frame_t *frame);
|
||||
vnet_crypto_async_frame_t *oct_crypto_frame_dequeue (vlib_main_t *vm,
|
||||
u32 *nb_elts_processed,
|
||||
u32 *enqueue_thread_idx);
|
||||
int oct_init_crypto_engine_handlers (vlib_main_t *vm, vnet_dev_t *dev);
|
||||
int oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev);
|
||||
#endif /* _CRYPTO_H_ */
|
@ -10,6 +10,7 @@
|
||||
#include <vnet/plugin/plugin.h>
|
||||
#include <vpp/app/version.h>
|
||||
#include <dev_octeon/octeon.h>
|
||||
#include <dev_octeon/crypto.h>
|
||||
|
||||
#include <base/roc_api.h>
|
||||
#include <common.h>
|
||||
@ -54,7 +55,9 @@ static struct
|
||||
_ (0xa064, RVU_VF, "Marvell Octeon Resource Virtualization Unit VF"),
|
||||
_ (0xa0f8, LBK_VF, "Marvell Octeon Loopback Unit VF"),
|
||||
_ (0xa0f7, SDP_VF, "Marvell Octeon System DPI Packet Interface Unit VF"),
|
||||
_ (0xa0f3, CPT_VF, "Marvell Octeon Cryptographic Accelerator Unit VF"),
|
||||
_ (0xa0f3, O10K_CPT_VF,
|
||||
"Marvell Octeon-10 Cryptographic Accelerator Unit VF"),
|
||||
_ (0xa0fe, O9K_CPT_VF, "Marvell Octeon-9 Cryptographic Accelerator Unit VF"),
|
||||
#undef _
|
||||
};
|
||||
|
||||
@ -191,17 +194,113 @@ oct_init_nix (vlib_main_t *vm, vnet_dev_t *dev)
|
||||
return vnet_dev_port_add (vm, dev, 0, &port_add_args);
|
||||
}
|
||||
|
||||
static int
|
||||
oct_conf_cpt (vlib_main_t *vm, vnet_dev_t *dev, oct_crypto_dev_t *ocd,
|
||||
int nb_lf)
|
||||
{
|
||||
struct roc_cpt *roc_cpt = ocd->roc_cpt;
|
||||
int rrv;
|
||||
|
||||
if ((rrv = roc_cpt_eng_grp_add (roc_cpt, CPT_ENG_TYPE_SE)) < 0)
|
||||
{
|
||||
log_err (dev, "Could not add CPT SE engines");
|
||||
return cnx_return_roc_err (dev, rrv, "roc_cpt_eng_grp_add");
|
||||
}
|
||||
if ((rrv = roc_cpt_eng_grp_add (roc_cpt, CPT_ENG_TYPE_IE)) < 0)
|
||||
{
|
||||
log_err (dev, "Could not add CPT IE engines");
|
||||
return cnx_return_roc_err (dev, rrv, "roc_cpt_eng_grp_add");
|
||||
}
|
||||
if (roc_cpt->eng_grp[CPT_ENG_TYPE_IE] != ROC_CPT_DFLT_ENG_GRP_SE_IE)
|
||||
{
|
||||
log_err (dev, "Invalid CPT IE engine group configuration");
|
||||
return -1;
|
||||
}
|
||||
if (roc_cpt->eng_grp[CPT_ENG_TYPE_SE] != ROC_CPT_DFLT_ENG_GRP_SE)
|
||||
{
|
||||
log_err (dev, "Invalid CPT SE engine group configuration");
|
||||
return -1;
|
||||
}
|
||||
if ((rrv = roc_cpt_dev_configure (roc_cpt, nb_lf, false, 0)) < 0)
|
||||
{
|
||||
log_err (dev, "could not configure crypto device %U",
|
||||
format_vlib_pci_addr, roc_cpt->pci_dev->addr);
|
||||
return cnx_return_roc_err (dev, rrv, "roc_cpt_dev_configure");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static vnet_dev_rv_t
|
||||
oct_conf_cpt_queue (vlib_main_t *vm, vnet_dev_t *dev, oct_crypto_dev_t *ocd)
|
||||
{
|
||||
struct roc_cpt *roc_cpt = ocd->roc_cpt;
|
||||
struct roc_cpt_lmtline *cpt_lmtline;
|
||||
struct roc_cpt_lf *cpt_lf;
|
||||
int rrv;
|
||||
|
||||
cpt_lf = &ocd->lf;
|
||||
cpt_lmtline = &ocd->lmtline;
|
||||
|
||||
cpt_lf->nb_desc = OCT_CPT_LF_MAX_NB_DESC;
|
||||
cpt_lf->lf_id = 0;
|
||||
if ((rrv = roc_cpt_lf_init (roc_cpt, cpt_lf)) < 0)
|
||||
return cnx_return_roc_err (dev, rrv, "roc_cpt_lf_init");
|
||||
|
||||
roc_cpt_iq_enable (cpt_lf);
|
||||
|
||||
if ((rrv = roc_cpt_lmtline_init (roc_cpt, cpt_lmtline, 0) < 0))
|
||||
return cnx_return_roc_err (dev, rrv, "roc_cpt_lmtline_init");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static vnet_dev_rv_t
|
||||
oct_init_cpt (vlib_main_t *vm, vnet_dev_t *dev)
|
||||
{
|
||||
oct_crypto_main_t *ocm = &oct_crypto_main;
|
||||
extern oct_plt_init_param_t oct_plt_init_param;
|
||||
oct_device_t *cd = vnet_dev_get_data (dev);
|
||||
oct_crypto_dev_t *ocd = NULL;
|
||||
int rrv;
|
||||
struct roc_cpt cpt = {
|
||||
.pci_dev = &cd->plt_pci_dev,
|
||||
};
|
||||
|
||||
if ((rrv = roc_cpt_dev_init (&cpt)))
|
||||
if (ocm->n_cpt == OCT_MAX_N_CPT_DEV || ocm->started)
|
||||
return VNET_DEV_ERR_NOT_SUPPORTED;
|
||||
|
||||
ocd = oct_plt_init_param.oct_plt_zmalloc (sizeof (oct_crypto_dev_t),
|
||||
CLIB_CACHE_LINE_BYTES);
|
||||
|
||||
ocd->roc_cpt = oct_plt_init_param.oct_plt_zmalloc (sizeof (struct roc_cpt),
|
||||
CLIB_CACHE_LINE_BYTES);
|
||||
ocd->roc_cpt->pci_dev = &cd->plt_pci_dev;
|
||||
|
||||
ocd->dev = dev;
|
||||
|
||||
if ((rrv = roc_cpt_dev_init (ocd->roc_cpt)))
|
||||
return cnx_return_roc_err (dev, rrv, "roc_cpt_dev_init");
|
||||
|
||||
if ((rrv = oct_conf_cpt (vm, dev, ocd, 1)))
|
||||
return rrv;
|
||||
|
||||
if ((rrv = oct_conf_cpt_queue (vm, dev, ocd)))
|
||||
return rrv;
|
||||
|
||||
if (!ocm->n_cpt)
|
||||
{
|
||||
/*
|
||||
* Initialize s/w queues, which are common across multiple
|
||||
* crypto devices
|
||||
*/
|
||||
oct_conf_sw_queue (vm, dev);
|
||||
|
||||
ocm->crypto_dev[0] = ocd;
|
||||
}
|
||||
|
||||
ocm->crypto_dev[1] = ocd;
|
||||
|
||||
oct_init_crypto_engine_handlers (vm, dev);
|
||||
|
||||
ocm->n_cpt++;
|
||||
|
||||
return VNET_DEV_OK;
|
||||
}
|
||||
|
||||
@ -256,7 +355,8 @@ oct_init (vlib_main_t *vm, vnet_dev_t *dev)
|
||||
case OCT_DEVICE_TYPE_SDP_VF:
|
||||
return oct_init_nix (vm, dev);
|
||||
|
||||
case OCT_DEVICE_TYPE_CPT_VF:
|
||||
case OCT_DEVICE_TYPE_O10K_CPT_VF:
|
||||
case OCT_DEVICE_TYPE_O9K_CPT_VF:
|
||||
return oct_init_cpt (vm, dev);
|
||||
|
||||
default:
|
||||
|
@ -30,7 +30,8 @@ typedef enum
|
||||
OCT_DEVICE_TYPE_RVU_VF,
|
||||
OCT_DEVICE_TYPE_LBK_VF,
|
||||
OCT_DEVICE_TYPE_SDP_VF,
|
||||
OCT_DEVICE_TYPE_CPT_VF,
|
||||
OCT_DEVICE_TYPE_O10K_CPT_VF,
|
||||
OCT_DEVICE_TYPE_O9K_CPT_VF,
|
||||
} __clib_packed oct_device_type_t;
|
||||
|
||||
typedef struct
|
||||
@ -41,7 +42,6 @@ typedef struct
|
||||
u8 full_duplex : 1;
|
||||
u32 speed;
|
||||
struct plt_pci_device plt_pci_dev;
|
||||
struct roc_cpt cpt;
|
||||
struct roc_nix *nix;
|
||||
} oct_device_t;
|
||||
|
||||
@ -102,7 +102,6 @@ typedef struct
|
||||
u64 aura_handle;
|
||||
u64 io_addr;
|
||||
void *lmt_addr;
|
||||
|
||||
oct_npa_batch_alloc_cl128_t *ba_buffer;
|
||||
u8 ba_first_cl;
|
||||
u8 ba_num_cl;
|
||||
|
@ -75,13 +75,12 @@ oct_drv_physmem_alloc (vlib_main_t *vm, u32 size, u32 align)
|
||||
|
||||
if (align)
|
||||
{
|
||||
/* Force cache line alloc in case alignment is less than cache line */
|
||||
align = align < CLIB_CACHE_LINE_BYTES ? CLIB_CACHE_LINE_BYTES : align;
|
||||
/* Force ROC align alloc in case alignment is less than ROC align */
|
||||
align = align < ROC_ALIGN ? ROC_ALIGN : align;
|
||||
mem = vlib_physmem_alloc_aligned_on_numa (vm, size, align, 0);
|
||||
}
|
||||
else
|
||||
mem =
|
||||
vlib_physmem_alloc_aligned_on_numa (vm, size, CLIB_CACHE_LINE_BYTES, 0);
|
||||
mem = vlib_physmem_alloc_aligned_on_numa (vm, size, ROC_ALIGN, 0);
|
||||
if (!mem)
|
||||
return NULL;
|
||||
|
||||
|
Reference in New Issue
Block a user