crypto-native: add ARMv8 AES-CBC implementation

Type: feature

Change-Id: I32256061b9509880eec843db2f918879cdafbe47
Signed-off-by: Damjan Marion <dmarion@me.com>
This commit is contained in:
Damjan Marion
2020-01-31 10:24:07 +01:00
committed by Damjan Marion
parent 62b1cea6ed
commit 776644efe7
8 changed files with 762 additions and 367 deletions

View File

@ -11,27 +11,37 @@
# See the License for the specific language governing permissions and
# limitations under the License.
if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
list(APPEND VARIANTS "sse42\;-march=silvermont")
list(APPEND VARIANTS "avx2\;-march=core-avx2")
if(compiler_flag_march_skylake_avx512)
list(APPEND VARIANTS "avx512\;-march=skylake-avx512")
endif()
if(compiler_flag_march_icelake_client)
list(APPEND VARIANTS "vaesni\;-march=icelake-client")
endif()
set (COMPILE_FILES aes_cbc.c aes_gcm.c)
set (COMPILE_OPTS -Wall -fno-common -maes)
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)")
list(APPEND VARIANTS "armv8\;-march=native")
set (COMPILE_FILES aes_cbc.c)
set (COMPILE_OPTS -Wall -fno-common)
endif()
if (NOT VARIANTS)
return()
endif()
add_vpp_plugin(crypto_native SOURCES main.c)
list(APPEND VARIANTS "sse42\;-march=silvermont")
list(APPEND VARIANTS "avx2\;-march=core-avx2")
if(compiler_flag_march_skylake_avx512)
list(APPEND VARIANTS "avx512\;-march=skylake-avx512")
endif()
if(compiler_flag_march_icelake_client)
list(APPEND VARIANTS "vaesni\;-march=icelake-client")
endif()
foreach(VARIANT ${VARIANTS})
list(GET VARIANT 0 v)
list(GET VARIANT 1 f)
set(l crypto_native_${v})
add_library(${l} OBJECT aes_cbc.c aes_gcm.c)
add_library(${l} OBJECT ${COMPILE_FILES})
set_target_properties(${l} PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_compile_options(${l} PUBLIC ${f} -Wall -fno-common -maes)
target_compile_options(${l} PUBLIC ${f} ${COMPILE_OPTS})
target_sources(crypto_native_plugin PRIVATE $<TARGET_OBJECTS:${l}>)
endforeach()

View File

@ -1,6 +1,6 @@
/*
*------------------------------------------------------------------
* Copyright (c) 2019 Cisco and/or its affiliates.
* Copyright (c) 2020 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@ -28,6 +28,8 @@ typedef enum
#define AES_KEY_ROUNDS(x) (10 + x * 2)
#define AES_KEY_BYTES(x) (16 + x * 8)
#ifdef __x86_64__
static_always_inline u8x16
aes_block_load (u8 * p)
{
@ -191,6 +193,133 @@ aes256_key_expand (u8x16 * key_schedule, u8 * key)
aes256_key_assist (k, 12, _mm_aeskeygenassist_si128 (k[11], 0x20));
aes256_key_assist (k, 14, _mm_aeskeygenassist_si128 (k[13], 0x40));
}
#endif
#ifdef __aarch64__
static_always_inline u8x16
aes_inv_mix_column (u8x16 a)
{
return vaesimcq_u8 (a);
}
static const u8x16 aese_prep_mask1 =
{ 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12 };
static const u8x16 aese_prep_mask2 =
{ 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15 };
static inline void
aes128_key_expand_round_neon (u8x16 * rk, u32 rcon)
{
u8x16 r, t, last_round = rk[-1], z = { };
r = vqtbl1q_u8 (last_round, aese_prep_mask1);
r = vaeseq_u8 (r, z);
r ^= (u8x16) vdupq_n_u32 (rcon);
r ^= last_round;
r ^= t = vextq_u8 (z, last_round, 12);
r ^= t = vextq_u8 (z, t, 12);
r ^= vextq_u8 (z, t, 12);
rk[0] = r;
}
void
aes128_key_expand (u8x16 * rk, const u8 * k)
{
rk[0] = vld1q_u8 (k);
aes128_key_expand_round_neon (rk + 1, 0x01);
aes128_key_expand_round_neon (rk + 2, 0x02);
aes128_key_expand_round_neon (rk + 3, 0x04);
aes128_key_expand_round_neon (rk + 4, 0x08);
aes128_key_expand_round_neon (rk + 5, 0x10);
aes128_key_expand_round_neon (rk + 6, 0x20);
aes128_key_expand_round_neon (rk + 7, 0x40);
aes128_key_expand_round_neon (rk + 8, 0x80);
aes128_key_expand_round_neon (rk + 9, 0x1b);
aes128_key_expand_round_neon (rk + 10, 0x36);
}
static inline void
aes192_key_expand_round_neon (u8x8 * rk, u32 rcon)
{
u8x8 r, last_round = rk[-1], z = { };
u8x16 r2, z2 = { };
r2 = (u8x16) vdupq_lane_u64 ((uint64x1_t) last_round, 0);
r2 = vqtbl1q_u8 (r2, aese_prep_mask1);
r2 = vaeseq_u8 (r2, z2);
r2 ^= (u8x16) vdupq_n_u32 (rcon);
r = (u8x8) vdup_laneq_u64 ((u64x2) r2, 0);
r ^= rk[-3];
r ^= vext_u8 (z, rk[-3], 4);
rk[0] = r;
r = rk[-2] ^ vext_u8 (r, z, 4);
r ^= vext_u8 (z, r, 4);
rk[1] = r;
if (rcon == 0x80)
return;
r = rk[-1] ^ vext_u8 (r, z, 4);
r ^= vext_u8 (z, r, 4);
rk[2] = r;
}
void
aes192_key_expand (u8x16 * ek, const u8 * k)
{
u8x8 *rk = (u8x8 *) ek;
ek[0] = vld1q_u8 (k);
rk[2] = vld1_u8 (k + 16);
aes192_key_expand_round_neon (rk + 3, 0x01);
aes192_key_expand_round_neon (rk + 6, 0x02);
aes192_key_expand_round_neon (rk + 9, 0x04);
aes192_key_expand_round_neon (rk + 12, 0x08);
aes192_key_expand_round_neon (rk + 15, 0x10);
aes192_key_expand_round_neon (rk + 18, 0x20);
aes192_key_expand_round_neon (rk + 21, 0x40);
aes192_key_expand_round_neon (rk + 24, 0x80);
}
static inline void
aes256_key_expand_round_neon (u8x16 * rk, u32 rcon)
{
u8x16 r, t, z = { };
r = vqtbl1q_u8 (rk[-1], rcon ? aese_prep_mask1 : aese_prep_mask2);
r = vaeseq_u8 (r, z);
if (rcon)
r ^= (u8x16) vdupq_n_u32 (rcon);
r ^= rk[-2];
r ^= t = vextq_u8 (z, rk[-2], 12);
r ^= t = vextq_u8 (z, t, 12);
r ^= vextq_u8 (z, t, 12);
rk[0] = r;
}
void
aes256_key_expand (u8x16 * rk, const u8 * k)
{
rk[0] = vld1q_u8 (k);
rk[1] = vld1q_u8 (k + 16);
aes256_key_expand_round_neon (rk + 2, 0x01);
aes256_key_expand_round_neon (rk + 3, 0);
aes256_key_expand_round_neon (rk + 4, 0x02);
aes256_key_expand_round_neon (rk + 5, 0);
aes256_key_expand_round_neon (rk + 6, 0x04);
aes256_key_expand_round_neon (rk + 7, 0);
aes256_key_expand_round_neon (rk + 8, 0x08);
aes256_key_expand_round_neon (rk + 9, 0);
aes256_key_expand_round_neon (rk + 10, 0x10);
aes256_key_expand_round_neon (rk + 11, 0);
aes256_key_expand_round_neon (rk + 12, 0x20);
aes256_key_expand_round_neon (rk + 13, 0);
aes256_key_expand_round_neon (rk + 14, 0x40);
}
#endif
static_always_inline void
aes_key_expand (u8x16 * key_schedule, u8 * key, aes_key_size_t ks)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,208 @@
/*
*------------------------------------------------------------------
* Copyright (c) 2020 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*------------------------------------------------------------------
*/
#ifdef __aarch64__
static_always_inline void
aes_cbc_dec (u8x16 * k, u8 * src, u8 * dst, u8 * iv, int count, int rounds)
{
u8x16 r0, r1, r2, r3, c0, c1, c2, c3, f;
f = vld1q_u8 (iv);
while (count >= 64)
{
c0 = r0 = vld1q_u8 (src);
c1 = r1 = vld1q_u8 (src + 16);
c2 = r2 = vld1q_u8 (src + 32);
c3 = r3 = vld1q_u8 (src + 48);
for (int i = 0; i < rounds - 1; i++)
{
r0 = vaesimcq_u8 (vaesdq_u8 (r0, k[i]));
r1 = vaesimcq_u8 (vaesdq_u8 (r1, k[i]));
r2 = vaesimcq_u8 (vaesdq_u8 (r2, k[i]));
r3 = vaesimcq_u8 (vaesdq_u8 (r3, k[i]));
}
r0 = vaesdq_u8 (r0, k[rounds - 1]) ^ k[rounds];
r1 = vaesdq_u8 (r1, k[rounds - 1]) ^ k[rounds];
r2 = vaesdq_u8 (r2, k[rounds - 1]) ^ k[rounds];
r3 = vaesdq_u8 (r3, k[rounds - 1]) ^ k[rounds];
vst1q_u8 (dst, r0 ^ f);
vst1q_u8 (dst + 16, r1 ^ c0);
vst1q_u8 (dst + 32, r2 ^ c1);
vst1q_u8 (dst + 48, r3 ^ c2);
f = c3;
src += 64;
dst += 64;
count -= 64;
}
while (count >= 16)
{
c0 = r0 = vld1q_u8 (src);
for (int i = 0; i < rounds - 1; i++)
r0 = vaesimcq_u8 (vaesdq_u8 (r0, k[i]));
r0 = vaesdq_u8 (r0, k[rounds - 1]) ^ k[rounds];
vst1q_u8 (dst, r0 ^ f);
f = c0;
src += 16;
dst += 16;
count -= 16;
}
}
static_always_inline u32
aesni_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
u32 n_ops, aes_key_size_t ks)
{
crypto_native_main_t *cm = &crypto_native_main;
crypto_native_per_thread_data_t *ptd =
vec_elt_at_index (cm->per_thread_data, vm->thread_index);
int rounds = AES_KEY_ROUNDS (ks);
u8 dummy[8192];
u32 i, j, count, n_left = n_ops;
u32x4 dummy_mask = { };
u32x4 len = { };
vnet_crypto_key_index_t key_index[4];
u8 *src[4] = { };
u8 *dst[4] = { };
u8x16 r[4] = { };
u8x16 k[15][4] = { };
for (i = 0; i < 4; i++)
key_index[i] = ~0;
more:
for (i = 0; i < 4; i++)
if (len[i] == 0)
{
if (n_left == 0)
{
/* no more work to enqueue, so we are enqueueing dummy buffer */
src[i] = dst[i] = dummy;
len[i] = sizeof (dummy);
dummy_mask[i] = 0;
}
else
{
if (ops[0]->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
{
r[i] = ptd->cbc_iv[i];
vst1q_u8 (ops[0]->iv, r[i]);
ptd->cbc_iv[i] = vaeseq_u8 (r[i], r[i]);
}
else
r[i] = vld1q_u8 (ops[0]->iv);
src[i] = ops[0]->src;
dst[i] = ops[0]->dst;
len[i] = ops[0]->len;
dummy_mask[i] = ~0;
if (key_index[i] != ops[0]->key_index)
{
aes_cbc_key_data_t *kd;
key_index[i] = ops[0]->key_index;
kd = (aes_cbc_key_data_t *) cm->key_data[key_index[i]];
for (j = 0; j < rounds + 1; j++)
k[j][i] = kd->encrypt_key[j];
}
ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
n_left--;
ops++;
}
}
count = u32x4_min_scalar (len);
ASSERT (count % 16 == 0);
for (i = 0; i < count; i += 16)
{
r[0] ^= vld1q_u8 (src[0] + i);
r[1] ^= vld1q_u8 (src[1] + i);
r[2] ^= vld1q_u8 (src[2] + i);
r[3] ^= vld1q_u8 (src[3] + i);
for (j = 0; j < rounds - 1; j++)
{
r[0] = vaesmcq_u8 (vaeseq_u8 (r[0], k[j][0]));
r[1] = vaesmcq_u8 (vaeseq_u8 (r[1], k[j][1]));
r[2] = vaesmcq_u8 (vaeseq_u8 (r[2], k[j][2]));
r[3] = vaesmcq_u8 (vaeseq_u8 (r[3], k[j][3]));
}
r[0] = vaeseq_u8 (r[0], k[j][0]) ^ k[rounds][0];
r[1] = vaeseq_u8 (r[1], k[j][1]) ^ k[rounds][1];
r[2] = vaeseq_u8 (r[2], k[j][2]) ^ k[rounds][2];
r[3] = vaeseq_u8 (r[3], k[j][3]) ^ k[rounds][3];
vst1q_u8 (dst[0] + i, r[0]);
vst1q_u8 (dst[1] + i, r[1]);
vst1q_u8 (dst[2] + i, r[2]);
vst1q_u8 (dst[3] + i, r[3]);
}
for (i = 0; i < 4; i++)
{
src[i] += count;
dst[i] += count;
len[i] -= count;
}
if (n_left > 0)
goto more;
if (!u32x4_is_all_zero (len & dummy_mask))
goto more;
return n_ops;
}
static_always_inline u32
aesni_ops_dec_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
u32 n_ops, aes_key_size_t ks)
{
crypto_native_main_t *cm = &crypto_native_main;
int rounds = AES_KEY_ROUNDS (ks);
vnet_crypto_op_t *op = ops[0];
aes_cbc_key_data_t *kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
u32 n_left = n_ops;
ASSERT (n_ops >= 1);
decrypt:
aes_cbc_dec (kd->decrypt_key, op->src, op->dst, op->iv, op->len, rounds);
op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
if (--n_left)
{
op += 1;
kd = (aes_cbc_key_data_t *) cm->key_data[op->key_index];
goto decrypt;
}
return n_ops;
}
#endif
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/

View File

@ -39,6 +39,7 @@ clib_error_t *crypto_native_aes_cbc_init_sse42 (vlib_main_t * vm);
clib_error_t *crypto_native_aes_cbc_init_avx2 (vlib_main_t * vm);
clib_error_t *crypto_native_aes_cbc_init_avx512 (vlib_main_t * vm);
clib_error_t *crypto_native_aes_cbc_init_vaes (vlib_main_t * vm);
clib_error_t *crypto_native_aes_cbc_init_neon (vlib_main_t * vm);
clib_error_t *crypto_native_aes_gcm_init_sse42 (vlib_main_t * vm);
clib_error_t *crypto_native_aes_gcm_init_avx2 (vlib_main_t * vm);

View File

@ -62,7 +62,8 @@ crypto_native_init (vlib_main_t * vm)
vlib_thread_main_t *tm = vlib_get_thread_main ();
clib_error_t *error = 0;
if (clib_cpu_supports_x86_aes () == 0)
if (clib_cpu_supports_x86_aes () == 0 &&
clib_cpu_supports_aarch64_aes () == 0)
return 0;
vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
@ -72,6 +73,7 @@ crypto_native_init (vlib_main_t * vm)
vnet_crypto_register_engine (vm, "native", 100,
"Native ISA Optimized Crypto");
#if __x86_64__
if (clib_cpu_supports_vaes ())
error = crypto_native_aes_cbc_init_vaes (vm);
else if (clib_cpu_supports_avx512f ())
@ -98,6 +100,13 @@ crypto_native_init (vlib_main_t * vm)
if (error)
goto error;
}
#endif
#if __aarch64__
error = crypto_native_aes_cbc_init_neon (vm);
if (error)
goto error;
#endif
vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
crypto_native_key_handler);

View File

@ -176,6 +176,12 @@ u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
*(u32 *) p3 = vgetq_lane_u32 (r, 3);
}
static_always_inline u32
u32x4_min_scalar (u32x4 v)
{
return vminvq_u32 (v);
}
#define CLIB_HAVE_VEC128_MSB_MASK
#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE