crypto-native: add SHA2-HMAC

Type: feature
Change-Id: I9e7ebf43536c972a62621fc7ad7406abec0ce071
Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
Damjan Marion
2024-03-15 18:33:34 +00:00
committed by Ole Tr�an
parent 18c9f14037
commit 9f2799fda4
7 changed files with 419 additions and 209 deletions

View File

@ -12,8 +12,8 @@
# limitations under the License.
if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
list(APPEND VARIANTS "slm\;-march=silvermont")
list(APPEND VARIANTS "hsw\;-march=haswell")
list(APPEND VARIANTS "slm\;-march=silvermont -maes")
list(APPEND VARIANTS "hsw\;-march=haswell -maes")
if(compiler_flag_march_skylake_avx512 AND compiler_flag_mprefer_vector_width_256)
list(APPEND VARIANTS "skx\;-march=skylake-avx512 -mprefer-vector-width=256")
endif()
@ -23,16 +23,15 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
if(compiler_flag_march_alderlake)
list(APPEND VARIANTS "adl\;-march=alderlake -mprefer-vector-width=256")
endif()
set (COMPILE_FILES aes_cbc.c aes_gcm.c aes_ctr.c)
set (COMPILE_OPTS -Wall -fno-common -maes)
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)")
list(APPEND VARIANTS "armv8\;-march=armv8.1-a+crc+crypto")
set (COMPILE_FILES aes_cbc.c aes_gcm.c aes_ctr.c)
set (COMPILE_OPTS -Wall -fno-common)
endif()
set (COMPILE_FILES aes_cbc.c aes_gcm.c aes_ctr.c sha2.c)
set (COMPILE_OPTS -Wall -fno-common)
if (NOT VARIANTS)
return()
endif()

View File

@ -249,18 +249,30 @@ decrypt:
return n_ops;
}
#define foreach_aes_cbc_handler_type _(128) _(192) _(256)
#define _(x) \
static u32 aes_ops_dec_aes_cbc_##x \
(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
{ return aes_ops_dec_aes_cbc (vm, ops, n_ops, AES_KEY_##x); } \
static u32 aes_ops_enc_aes_cbc_##x \
(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
{ return aes_ops_enc_aes_cbc (vm, ops, n_ops, AES_KEY_##x); } \
foreach_aes_cbc_handler_type;
#undef _
static int
aes_cbc_cpu_probe ()
{
#if defined(__VAES__) && defined(__AVX512F__)
if (clib_cpu_supports_vaes () && clib_cpu_supports_avx512f ())
return 50;
#elif defined(__VAES__)
if (clib_cpu_supports_vaes ())
return 40;
#elif defined(__AVX512F__)
if (clib_cpu_supports_avx512f ())
return 30;
#elif defined(__AVX2__)
if (clib_cpu_supports_avx2 ())
return 20;
#elif __AES__
if (clib_cpu_supports_aes ())
return 10;
#elif __aarch64__
if (clib_cpu_supports_aarch64_aes ())
return 10;
#endif
return -1;
}
static void *
aes_cbc_key_exp_128 (vnet_crypto_key_t *key)
@ -289,43 +301,39 @@ aes_cbc_key_exp_256 (vnet_crypto_key_t *key)
return kd;
}
#include <fcntl.h>
#define foreach_aes_cbc_handler_type _ (128) _ (192) _ (256)
clib_error_t *
#if defined(__VAES__) && defined(__AVX512F__)
crypto_native_aes_cbc_init_icl (vlib_main_t *vm)
#elif defined(__VAES__)
crypto_native_aes_cbc_init_adl (vlib_main_t *vm)
#elif __AVX512F__
crypto_native_aes_cbc_init_skx (vlib_main_t * vm)
#elif __aarch64__
crypto_native_aes_cbc_init_neon (vlib_main_t * vm)
#elif __AVX2__
crypto_native_aes_cbc_init_hsw (vlib_main_t * vm)
#else
crypto_native_aes_cbc_init_slm (vlib_main_t * vm)
#endif
{
crypto_native_main_t *cm = &crypto_native_main;
#define _(x) \
static u32 aes_ops_enc_aes_cbc_##x (vlib_main_t *vm, \
vnet_crypto_op_t *ops[], u32 n_ops) \
{ \
return aes_ops_enc_aes_cbc (vm, ops, n_ops, AES_KEY_##x); \
} \
\
CRYPTO_NATIVE_OP_HANDLER (aes_##x##_cbc_enc) = { \
.op_id = VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \
.fn = aes_ops_enc_aes_cbc_##x, \
.probe = aes_cbc_cpu_probe, \
}; \
\
static u32 aes_ops_dec_aes_cbc_##x (vlib_main_t *vm, \
vnet_crypto_op_t *ops[], u32 n_ops) \
{ \
return aes_ops_dec_aes_cbc (vm, ops, n_ops, AES_KEY_##x); \
} \
\
CRYPTO_NATIVE_OP_HANDLER (aes_##x##_cbc_dec) = { \
.op_id = VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \
.fn = aes_ops_dec_aes_cbc_##x, \
.probe = aes_cbc_cpu_probe, \
}; \
\
CRYPTO_NATIVE_KEY_HANDLER (aes_##x##_cbc) = { \
.alg_id = VNET_CRYPTO_ALG_AES_##x##_CBC, \
.key_fn = aes_cbc_key_exp_##x, \
.probe = aes_cbc_cpu_probe, \
};
#define _(x) \
vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \
aes_ops_enc_aes_cbc_##x); \
vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \
aes_ops_dec_aes_cbc_##x); \
cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_CBC] = aes_cbc_key_exp_##x;
foreach_aes_cbc_handler_type;
foreach_aes_cbc_handler_type;
#undef _
return 0;
}
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/

View File

@ -81,32 +81,50 @@ aes_ctr_key_exp (vnet_crypto_key_t *key, aes_key_size_t ks)
foreach_aes_ctr_handler_type;
#undef _
clib_error_t *
#if defined(__VAES__) && defined(__AVX512F__)
crypto_native_aes_ctr_init_icl (vlib_main_t *vm)
#elif defined(__VAES__)
crypto_native_aes_ctr_init_adl (vlib_main_t *vm)
#elif __AVX512F__
crypto_native_aes_ctr_init_skx (vlib_main_t *vm)
#elif __AVX2__
crypto_native_aes_ctr_init_hsw (vlib_main_t *vm)
#elif __aarch64__
crypto_native_aes_ctr_init_neon (vlib_main_t *vm)
#else
crypto_native_aes_ctr_init_slm (vlib_main_t *vm)
#endif
static int
probe ()
{
crypto_native_main_t *cm = &crypto_native_main;
#define _(x) \
vnet_crypto_register_ops_handlers ( \
vm, cm->crypto_engine_index, VNET_CRYPTO_OP_AES_##x##_CTR_ENC, \
aes_ops_aes_ctr_##x, aes_ops_aes_ctr_##x##_chained); \
vnet_crypto_register_ops_handlers ( \
vm, cm->crypto_engine_index, VNET_CRYPTO_OP_AES_##x##_CTR_DEC, \
aes_ops_aes_ctr_##x, aes_ops_aes_ctr_##x##_chained); \
cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_CTR] = aes_ctr_key_exp_##x;
foreach_aes_ctr_handler_type;
#undef _
return 0;
#if defined(__VAES__) && defined(__AVX512F__)
if (clib_cpu_supports_vaes () && clib_cpu_supports_avx512f ())
return 50;
#elif defined(__VAES__)
if (clib_cpu_supports_vaes ())
return 40;
#elif defined(__AVX512F__)
if (clib_cpu_supports_avx512f ())
return 30;
#elif defined(__AVX2__)
if (clib_cpu_supports_avx2 ())
return 20;
#elif __AES__
if (clib_cpu_supports_aes ())
return 10;
#elif __aarch64__
if (clib_cpu_supports_aarch64_aes ())
return 10;
#endif
return -1;
}
#define _(b) \
CRYPTO_NATIVE_OP_HANDLER (aes_##b##_ctr_enc) = { \
.op_id = VNET_CRYPTO_OP_AES_##b##_CTR_ENC, \
.fn = aes_ops_aes_ctr_##b, \
.cfn = aes_ops_aes_ctr_##b##_chained, \
.probe = probe, \
}; \
\
CRYPTO_NATIVE_OP_HANDLER (aes_##b##_ctr_dec) = { \
.op_id = VNET_CRYPTO_OP_AES_##b##_CTR_DEC, \
.fn = aes_ops_aes_ctr_##b, \
.cfn = aes_ops_aes_ctr_##b##_chained, \
.probe = probe, \
}; \
CRYPTO_NATIVE_KEY_HANDLER (aes_##b##_ctr) = { \
.alg_id = VNET_CRYPTO_ALG_AES_##b##_CTR, \
.key_fn = aes_ctr_key_exp_##b, \
.probe = probe, \
};
_ (128) _ (192) _ (256)
#undef _

View File

@ -118,40 +118,49 @@ aes_gcm_key_exp (vnet_crypto_key_t *key, aes_key_size_t ks)
foreach_aes_gcm_handler_type;
#undef _
clib_error_t *
#if defined(__VAES__) && defined(__AVX512F__)
crypto_native_aes_gcm_init_icl (vlib_main_t *vm)
#elif defined(__VAES__)
crypto_native_aes_gcm_init_adl (vlib_main_t *vm)
#elif __AVX512F__
crypto_native_aes_gcm_init_skx (vlib_main_t *vm)
#elif __AVX2__
crypto_native_aes_gcm_init_hsw (vlib_main_t *vm)
#elif __aarch64__
crypto_native_aes_gcm_init_neon (vlib_main_t *vm)
#else
crypto_native_aes_gcm_init_slm (vlib_main_t *vm)
#endif
static int
probe ()
{
crypto_native_main_t *cm = &crypto_native_main;
#define _(x) \
vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
VNET_CRYPTO_OP_AES_##x##_GCM_ENC, \
aes_ops_enc_aes_gcm_##x); \
vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
VNET_CRYPTO_OP_AES_##x##_GCM_DEC, \
aes_ops_dec_aes_gcm_##x); \
cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_GCM] = aes_gcm_key_exp_##x;
foreach_aes_gcm_handler_type;
#undef _
return 0;
#if defined(__VAES__) && defined(__AVX512F__)
if (clib_cpu_supports_vpclmulqdq () && clib_cpu_supports_vaes () &&
clib_cpu_supports_avx512f ())
return 50;
#elif defined(__VAES__)
if (clib_cpu_supports_vpclmulqdq () && clib_cpu_supports_vaes ())
return 40;
#elif defined(__AVX512F__)
if (clib_cpu_supports_pclmulqdq () && clib_cpu_supports_avx512f ())
return 30;
#elif defined(__AVX2__)
if (clib_cpu_supports_pclmulqdq () && clib_cpu_supports_avx2 ())
return 20;
#elif __AES__
if (clib_cpu_supports_pclmulqdq () && clib_cpu_supports_aes ())
return 10;
#elif __aarch64__
if (clib_cpu_supports_aarch64_aes ())
return 10;
#endif
return -1;
}
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/
#define _(b) \
CRYPTO_NATIVE_OP_HANDLER (aes_##b##_gcm_enc) = { \
.op_id = VNET_CRYPTO_OP_AES_##b##_GCM_ENC, \
.fn = aes_ops_enc_aes_gcm_##b, \
.probe = probe, \
}; \
\
CRYPTO_NATIVE_OP_HANDLER (aes_##b##_gcm_dec) = { \
.op_id = VNET_CRYPTO_OP_AES_##b##_GCM_DEC, \
.fn = aes_ops_dec_aes_gcm_##b, \
.probe = probe, \
}; \
CRYPTO_NATIVE_KEY_HANDLER (aes_##b##_gcm) = { \
.alg_id = VNET_CRYPTO_ALG_AES_##b##_GCM, \
.key_fn = aes_gcm_key_exp_##b, \
.probe = probe, \
};
_ (128) _ (192) _ (256)
#undef _

View File

@ -19,33 +19,66 @@
#define __crypto_native_h__
typedef void *(crypto_native_key_fn_t) (vnet_crypto_key_t * key);
typedef int (crypto_native_variant_probe_t) ();
typedef struct crypto_native_op_handler
{
struct crypto_native_op_handler *next;
vnet_crypto_op_id_t op_id;
vnet_crypto_ops_handler_t *fn;
vnet_crypto_chained_ops_handler_t *cfn;
crypto_native_variant_probe_t *probe;
int priority;
} crypto_native_op_handler_t;
typedef struct crypto_native_key_handler
{
struct crypto_native_key_handler *next;
vnet_crypto_alg_t alg_id;
crypto_native_key_fn_t *key_fn;
crypto_native_variant_probe_t *probe;
int priority;
} crypto_native_key_handler_t;
typedef struct
{
u32 crypto_engine_index;
crypto_native_key_fn_t *key_fn[VNET_CRYPTO_N_ALGS];
void **key_data;
crypto_native_op_handler_t *op_handlers;
crypto_native_key_handler_t *key_handlers;
} crypto_native_main_t;
extern crypto_native_main_t crypto_native_main;
#define foreach_crypto_native_march_variant \
_ (slm) _ (hsw) _ (skx) _ (icl) _ (adl) _ (neon)
#define _(v) \
clib_error_t __clib_weak *crypto_native_aes_cbc_init_##v (vlib_main_t *vm); \
clib_error_t __clib_weak *crypto_native_aes_ctr_init_##v (vlib_main_t *vm); \
clib_error_t __clib_weak *crypto_native_aes_gcm_init_##v (vlib_main_t *vm);
foreach_crypto_native_march_variant;
#undef _
#define CRYPTO_NATIVE_OP_HANDLER(x) \
static crypto_native_op_handler_t __crypto_native_op_handler_##x; \
static void __clib_constructor __crypto_native_op_handler_cb_##x (void) \
{ \
crypto_native_main_t *cm = &crypto_native_main; \
int priority = __crypto_native_op_handler_##x.probe (); \
if (priority >= 0) \
{ \
__crypto_native_op_handler_##x.priority = priority; \
__crypto_native_op_handler_##x.next = cm->op_handlers; \
cm->op_handlers = &__crypto_native_op_handler_##x; \
} \
} \
static crypto_native_op_handler_t __crypto_native_op_handler_##x
#define CRYPTO_NATIVE_KEY_HANDLER(x) \
static crypto_native_key_handler_t __crypto_native_key_handler_##x; \
static void __clib_constructor __crypto_native_key_handler_cb_##x (void) \
{ \
crypto_native_main_t *cm = &crypto_native_main; \
int priority = __crypto_native_key_handler_##x.probe (); \
if (priority >= 0) \
{ \
__crypto_native_key_handler_##x.priority = priority; \
__crypto_native_key_handler_##x.next = cm->key_handlers; \
cm->key_handlers = &__crypto_native_key_handler_##x; \
} \
} \
static crypto_native_key_handler_t __crypto_native_key_handler_##x
#endif /* __crypto_native_h__ */
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/

View File

@ -63,95 +63,52 @@ clib_error_t *
crypto_native_init (vlib_main_t * vm)
{
crypto_native_main_t *cm = &crypto_native_main;
clib_error_t *error = 0;
if (clib_cpu_supports_x86_aes () == 0 &&
clib_cpu_supports_aarch64_aes () == 0)
if (cm->op_handlers == 0)
return 0;
cm->crypto_engine_index =
vnet_crypto_register_engine (vm, "native", 100,
"Native ISA Optimized Crypto");
if (0);
#if __x86_64__
else if (crypto_native_aes_cbc_init_icl && clib_cpu_supports_vaes () &&
clib_cpu_supports_avx512f ())
error = crypto_native_aes_cbc_init_icl (vm);
else if (crypto_native_aes_cbc_init_adl && clib_cpu_supports_vaes ())
error = crypto_native_aes_cbc_init_adl (vm);
else if (crypto_native_aes_cbc_init_skx && clib_cpu_supports_avx512f ())
error = crypto_native_aes_cbc_init_skx (vm);
else if (crypto_native_aes_cbc_init_hsw && clib_cpu_supports_avx2 ())
error = crypto_native_aes_cbc_init_hsw (vm);
else if (crypto_native_aes_cbc_init_slm)
error = crypto_native_aes_cbc_init_slm (vm);
#endif
#if __aarch64__
else if (crypto_native_aes_cbc_init_neon)
error = crypto_native_aes_cbc_init_neon (vm);
#endif
else
error = clib_error_return (0, "No AES CBC implemenation available");
crypto_native_op_handler_t *oh = cm->op_handlers;
crypto_native_key_handler_t *kh = cm->key_handlers;
crypto_native_op_handler_t **best_by_op_id = 0;
crypto_native_key_handler_t **best_by_alg_id = 0;
if (error)
return error;
if (0)
;
#if __x86_64__
else if (crypto_native_aes_ctr_init_icl && clib_cpu_supports_vaes () &&
clib_cpu_supports_avx512f ())
error = crypto_native_aes_ctr_init_icl (vm);
else if (crypto_native_aes_ctr_init_adl && clib_cpu_supports_vaes ())
error = crypto_native_aes_ctr_init_adl (vm);
else if (crypto_native_aes_ctr_init_skx && clib_cpu_supports_avx512f ())
error = crypto_native_aes_ctr_init_skx (vm);
else if (crypto_native_aes_ctr_init_hsw && clib_cpu_supports_avx2 ())
error = crypto_native_aes_ctr_init_hsw (vm);
else if (crypto_native_aes_ctr_init_slm)
error = crypto_native_aes_ctr_init_slm (vm);
#endif
#if __aarch64__
else if (crypto_native_aes_ctr_init_neon)
error = crypto_native_aes_ctr_init_neon (vm);
#endif
else
error = clib_error_return (0, "No AES CTR implemenation available");
if (error)
return error;
#if __x86_64__
if (clib_cpu_supports_pclmulqdq ())
while (oh)
{
if (crypto_native_aes_gcm_init_icl && clib_cpu_supports_vaes () &&
clib_cpu_supports_avx512f ())
error = crypto_native_aes_gcm_init_icl (vm);
else if (crypto_native_aes_gcm_init_adl && clib_cpu_supports_vaes ())
error = crypto_native_aes_gcm_init_adl (vm);
else if (crypto_native_aes_gcm_init_skx && clib_cpu_supports_avx512f ())
error = crypto_native_aes_gcm_init_skx (vm);
else if (crypto_native_aes_gcm_init_hsw && clib_cpu_supports_avx2 ())
error = crypto_native_aes_gcm_init_hsw (vm);
else if (crypto_native_aes_gcm_init_slm)
error = crypto_native_aes_gcm_init_slm (vm);
else
error = clib_error_return (0, "No AES GCM implemenation available");
vec_validate (best_by_op_id, oh->op_id);
if (error)
return error;
if (best_by_op_id[oh->op_id] == 0 ||
best_by_op_id[oh->op_id]->priority < oh->priority)
best_by_op_id[oh->op_id] = oh;
oh = oh->next;
}
#endif
#if __aarch64__
if (crypto_native_aes_gcm_init_neon)
error = crypto_native_aes_gcm_init_neon (vm);
else
error = clib_error_return (0, "No AES GCM implemenation available");
if (error)
return error;
#endif
while (kh)
{
vec_validate (best_by_alg_id, kh->alg_id);
if (best_by_alg_id[kh->alg_id] == 0 ||
best_by_alg_id[kh->alg_id]->priority < kh->priority)
best_by_alg_id[kh->alg_id] = kh;
kh = kh->next;
}
vec_foreach_pointer (oh, best_by_op_id)
if (oh)
vnet_crypto_register_ops_handlers (vm, cm->crypto_engine_index,
oh->op_id, oh->fn, oh->cfn);
vec_foreach_pointer (kh, best_by_alg_id)
if (kh)
cm->key_fn[kh->alg_id] = kh->key_fn;
vec_free (best_by_op_id);
vec_free (best_by_alg_id);
vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
crypto_native_key_handler);

View File

@ -0,0 +1,186 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright(c) 2024 Cisco Systems, Inc.
*/
#include <vlib/vlib.h>
#include <vnet/plugin/plugin.h>
#include <vnet/crypto/crypto.h>
#include <crypto_native/crypto_native.h>
#include <vppinfra/crypto/sha2.h>
static_always_inline u32
crypto_native_ops_hash_sha2 (vlib_main_t *vm, vnet_crypto_op_t *ops[],
u32 n_ops, vnet_crypto_op_chunk_t *chunks,
clib_sha2_type_t type, int maybe_chained)
{
vnet_crypto_op_t *op = ops[0];
clib_sha2_ctx_t ctx;
u32 n_left = n_ops;
next:
if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
{
vnet_crypto_op_chunk_t *chp = chunks + op->chunk_index;
clib_sha2_init (&ctx, type);
for (int j = 0; j < op->n_chunks; j++, chp++)
clib_sha2_update (&ctx, chp->src, chp->len);
clib_sha2_final (&ctx, op->digest);
}
else
clib_sha2 (type, op->src, op->len, op->digest);
op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
if (--n_left)
{
op += 1;
goto next;
}
return n_ops;
}
static_always_inline u32
crypto_native_ops_hmac_sha2 (vlib_main_t *vm, vnet_crypto_op_t *ops[],
u32 n_ops, vnet_crypto_op_chunk_t *chunks,
clib_sha2_type_t type)
{
crypto_native_main_t *cm = &crypto_native_main;
vnet_crypto_op_t *op = ops[0];
u32 n_left = n_ops;
clib_sha2_hmac_ctx_t ctx;
u8 buffer[64];
u32 sz, n_fail = 0;
for (; n_left; n_left--, op++)
{
clib_sha2_hmac_init (
&ctx, type, (clib_sha2_hmac_key_data_t *) cm->key_data[op->key_index]);
if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
{
vnet_crypto_op_chunk_t *chp = chunks + op->chunk_index;
for (int j = 0; j < op->n_chunks; j++, chp++)
clib_sha2_hmac_update (&ctx, chp->src, chp->len);
}
else
clib_sha2_hmac_update (&ctx, op->src, op->len);
clib_sha2_hmac_final (&ctx, buffer);
if (op->digest_len)
{
sz = op->digest_len;
if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
{
if ((memcmp (op->digest, buffer, sz)))
{
n_fail++;
op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
continue;
}
}
else
clib_memcpy_fast (op->digest, buffer, sz);
}
else
{
sz = clib_sha2_variants[type].digest_size;
if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
{
if ((memcmp (op->digest, buffer, sz)))
{
n_fail++;
op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
continue;
}
}
else
clib_memcpy_fast (op->digest, buffer, sz);
}
op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
}
return n_ops - n_fail;
}
static void *
sha2_key_add (vnet_crypto_key_t *key, clib_sha2_type_t type)
{
clib_sha2_hmac_key_data_t *kd;
kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
clib_sha2_hmac_key_data (type, key->data, vec_len (key->data), kd);
return kd;
}
static int
probe ()
{
#if defined(__SHA__) && defined(__x86_64__)
if (clib_cpu_supports_sha ())
return 50;
#elif defined(__ARM_FEATURE_SHA2)
if (clib_cpu_supports_sha2 ())
return 10;
#endif
return -1;
}
#define _(b) \
static u32 crypto_native_ops_hash_sha##b ( \
vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
{ \
return crypto_native_ops_hash_sha2 (vm, ops, n_ops, 0, CLIB_SHA2_##b, 0); \
} \
\
static u32 crypto_native_ops_chained_hash_sha##b ( \
vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
u32 n_ops) \
{ \
return crypto_native_ops_hash_sha2 (vm, ops, n_ops, chunks, \
CLIB_SHA2_##b, 1); \
} \
\
static u32 crypto_native_ops_hmac_sha##b ( \
vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops) \
{ \
return crypto_native_ops_hmac_sha2 (vm, ops, n_ops, 0, CLIB_SHA2_##b); \
} \
\
static u32 crypto_native_ops_chained_hmac_sha##b ( \
vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
u32 n_ops) \
{ \
return crypto_native_ops_hmac_sha2 (vm, ops, n_ops, chunks, \
CLIB_SHA2_##b); \
} \
\
static void *sha2_##b##_key_add (vnet_crypto_key_t *k) \
{ \
return sha2_key_add (k, CLIB_SHA2_##b); \
} \
\
CRYPTO_NATIVE_OP_HANDLER (crypto_native_hash_sha##b) = { \
.op_id = VNET_CRYPTO_OP_SHA##b##_HASH, \
.fn = crypto_native_ops_hash_sha##b, \
.cfn = crypto_native_ops_chained_hash_sha##b, \
.probe = probe, \
}; \
CRYPTO_NATIVE_OP_HANDLER (crypto_native_hmac_sha##b) = { \
.op_id = VNET_CRYPTO_OP_SHA##b##_HMAC, \
.fn = crypto_native_ops_hmac_sha##b, \
.cfn = crypto_native_ops_chained_hmac_sha##b, \
.probe = probe, \
}; \
CRYPTO_NATIVE_KEY_HANDLER (crypto_native_hmac_sha##b) = { \
.alg_id = VNET_CRYPTO_ALG_HMAC_SHA##b, \
.key_fn = sha2_##b##_key_add, \
.probe = probe, \
};
_ (224)
_ (256)
#undef _