crypto-native: rename crypto_ia32 to crypto_native

Type: refactor

Change-Id: I9f21b3bf669ff913ff50afe5459cf52ff987e701
Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
Damjan Marion
2020-01-28 09:55:25 +01:00
committed by Damjan Marion
parent 0d4a61216c
commit 7d08e39a87
9 changed files with 96 additions and 96 deletions

View File

@@ -0,0 +1,37 @@
# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
return()
endif()
add_vpp_plugin(crypto_native SOURCES main.c)
list(APPEND VARIANTS "sse42\;-march=silvermont")
list(APPEND VARIANTS "avx2\;-march=core-avx2")
if(compiler_flag_march_skylake_avx512)
list(APPEND VARIANTS "avx512\;-march=skylake-avx512")
endif()
if(compiler_flag_march_icelake_client)
list(APPEND VARIANTS "vaesni\;-march=icelake-client")
endif()
foreach(VARIANT ${VARIANTS})
list(GET VARIANT 0 v)
list(GET VARIANT 1 f)
set(l crypto_native_${v})
add_library(${l} OBJECT aes_cbc.c aes_gcm.c)
set_target_properties(${l} PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_compile_options(${l} PUBLIC ${f} -Wall -fno-common -maes)
target_sources(crypto_native_plugin PRIVATE $<TARGET_OBJECTS:${l}>)
endforeach()

View File

@@ -0,0 +1,10 @@
---
name: IPSec crypto engine provided by native implementation
maintainer: Damjan Marion <damarion@cisco.com>
features:
- CBC(128, 192, 256)
- GCM(128, 192, 256)
description: "An implentation of a native crypto-engine"
state: production
properties: [API, CLI, MULTITHREAD]

View File

@@ -0,0 +1,226 @@
/*
*------------------------------------------------------------------
* Copyright (c) 2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*------------------------------------------------------------------
*/
#ifndef __aesni_h__
#define __aesni_h__
typedef enum
{
AES_KEY_128 = 0,
AES_KEY_192 = 1,
AES_KEY_256 = 2,
} aes_key_size_t;
#define AES_KEY_ROUNDS(x) (10 + x * 2)
#define AES_KEY_BYTES(x) (16 + x * 8)
/* AES-NI based AES key expansion based on code samples from
Intel(r) Advanced Encryption Standard (AES) New Instructions White Paper
(323641-001) */
static_always_inline __m128i
aes128_key_assist (__m128i r1, __m128i r2)
{
r1 ^= _mm_slli_si128 (r1, 4);
r1 ^= _mm_slli_si128 (r1, 4);
r1 ^= _mm_slli_si128 (r1, 4);
return r1 ^ _mm_shuffle_epi32 (r2, 0xff);
}
static_always_inline void
aes128_key_expand (__m128i * k, u8 * key)
{
k[0] = _mm_loadu_si128 ((const __m128i *) key);
k[1] = aes128_key_assist (k[0], _mm_aeskeygenassist_si128 (k[0], 0x01));
k[2] = aes128_key_assist (k[1], _mm_aeskeygenassist_si128 (k[1], 0x02));
k[3] = aes128_key_assist (k[2], _mm_aeskeygenassist_si128 (k[2], 0x04));
k[4] = aes128_key_assist (k[3], _mm_aeskeygenassist_si128 (k[3], 0x08));
k[5] = aes128_key_assist (k[4], _mm_aeskeygenassist_si128 (k[4], 0x10));
k[6] = aes128_key_assist (k[5], _mm_aeskeygenassist_si128 (k[5], 0x20));
k[7] = aes128_key_assist (k[6], _mm_aeskeygenassist_si128 (k[6], 0x40));
k[8] = aes128_key_assist (k[7], _mm_aeskeygenassist_si128 (k[7], 0x80));
k[9] = aes128_key_assist (k[8], _mm_aeskeygenassist_si128 (k[8], 0x1b));
k[10] = aes128_key_assist (k[9], _mm_aeskeygenassist_si128 (k[9], 0x36));
}
static_always_inline void
aes192_key_assist (__m128i * r1, __m128i * r2, __m128i * r3)
{
__m128i r;
*r1 ^= r = _mm_slli_si128 (*r1, 0x4);
*r1 ^= r = _mm_slli_si128 (r, 0x4);
*r1 ^= _mm_slli_si128 (r, 0x4);
*r1 ^= _mm_shuffle_epi32 (*r2, 0x55);
*r3 ^= _mm_slli_si128 (*r3, 0x4);
*r3 ^= *r2 = _mm_shuffle_epi32 (*r1, 0xff);
}
static_always_inline void
aes192_key_expand (__m128i * k, u8 * key)
{
__m128i r1, r2, r3;
k[0] = r1 = _mm_loadu_si128 ((__m128i *) key);
/* load the 24-bytes key as 2 * 16-bytes (and ignore last 8-bytes) */
r3 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) (key + 16));
k[1] = r3;
r2 = _mm_aeskeygenassist_si128 (r3, 0x1);
aes192_key_assist (&r1, &r2, &r3);
k[1] = (__m128i) _mm_shuffle_pd ((__m128d) k[1], (__m128d) r1, 0);
k[2] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
r2 = _mm_aeskeygenassist_si128 (r3, 0x2);
aes192_key_assist (&r1, &r2, &r3);
k[3] = r1;
k[4] = r3;
r2 = _mm_aeskeygenassist_si128 (r3, 0x4);
aes192_key_assist (&r1, &r2, &r3);
k[4] = (__m128i) _mm_shuffle_pd ((__m128d) k[4], (__m128d) r1, 0);
k[5] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
r2 = _mm_aeskeygenassist_si128 (r3, 0x8);
aes192_key_assist (&r1, &r2, &r3);
k[6] = r1;
k[7] = r3;
r2 = _mm_aeskeygenassist_si128 (r3, 0x10);
aes192_key_assist (&r1, &r2, &r3);
k[7] = (__m128i) _mm_shuffle_pd ((__m128d) k[7], (__m128d) r1, 0);
k[8] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
r2 = _mm_aeskeygenassist_si128 (r3, 0x20);
aes192_key_assist (&r1, &r2, &r3);
k[9] = r1;
k[10] = r3;
r2 = _mm_aeskeygenassist_si128 (r3, 0x40);
aes192_key_assist (&r1, &r2, &r3);
k[10] = (__m128i) _mm_shuffle_pd ((__m128d) k[10], (__m128d) r1, 0);
k[11] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
r2 = _mm_aeskeygenassist_si128 (r3, 0x80);
aes192_key_assist (&r1, &r2, &r3);
k[12] = r1;
}
static_always_inline void
aes256_key_assist1 (__m128i * r1, __m128i * r2)
{
__m128i r;
*r1 ^= r = _mm_slli_si128 (*r1, 0x4);
*r1 ^= r = _mm_slli_si128 (r, 0x4);
*r1 ^= _mm_slli_si128 (r, 0x4);
*r1 ^= *r2 = _mm_shuffle_epi32 (*r2, 0xff);
}
static_always_inline void
aes256_key_assist2 (__m128i r1, __m128i * r3)
{
__m128i r;
*r3 ^= r = _mm_slli_si128 (*r3, 0x4);
*r3 ^= r = _mm_slli_si128 (r, 0x4);
*r3 ^= _mm_slli_si128 (r, 0x4);
*r3 ^= _mm_shuffle_epi32 (_mm_aeskeygenassist_si128 (r1, 0x0), 0xaa);
}
static_always_inline void
aes256_key_expand (__m128i * k, u8 * key)
{
__m128i r1, r2, r3;
k[0] = r1 = _mm_loadu_si128 ((__m128i *) key);
k[1] = r3 = _mm_loadu_si128 ((__m128i *) (key + 16));
r2 = _mm_aeskeygenassist_si128 (k[1], 0x01);
aes256_key_assist1 (&r1, &r2);
k[2] = r1;
aes256_key_assist2 (r1, &r3);
k[3] = r3;
r2 = _mm_aeskeygenassist_si128 (r3, 0x02);
aes256_key_assist1 (&r1, &r2);
k[4] = r1;
aes256_key_assist2 (r1, &r3);
k[5] = r3;
r2 = _mm_aeskeygenassist_si128 (r3, 0x04);
aes256_key_assist1 (&r1, &r2);
k[6] = r1;
aes256_key_assist2 (r1, &r3);
k[7] = r3;
r2 = _mm_aeskeygenassist_si128 (r3, 0x08);
aes256_key_assist1 (&r1, &r2);
k[8] = r1;
aes256_key_assist2 (r1, &r3);
k[9] = r3;
r2 = _mm_aeskeygenassist_si128 (r3, 0x10);
aes256_key_assist1 (&r1, &r2);
k[10] = r1;
aes256_key_assist2 (r1, &r3);
k[11] = r3;
r2 = _mm_aeskeygenassist_si128 (r3, 0x20);
aes256_key_assist1 (&r1, &r2);
k[12] = r1;
aes256_key_assist2 (r1, &r3);
k[13] = r3;
r2 = _mm_aeskeygenassist_si128 (r3, 0x40);
aes256_key_assist1 (&r1, &r2);
k[14] = r1;
}
static_always_inline void
aes_key_expand (__m128i * k, u8 * key, aes_key_size_t ks)
{
switch (ks)
{
case AES_KEY_128:
aes128_key_expand (k, key);
break;
case AES_KEY_192:
aes192_key_expand (k, key);
break;
case AES_KEY_256:
aes256_key_expand (k, key);
break;
}
}
static_always_inline void
aes_key_enc_to_dec (__m128i * k, aes_key_size_t ks)
{
int rounds = AES_KEY_ROUNDS (ks);
__m128i r;
r = k[rounds];
k[rounds] = k[0];
k[0] = r;
for (int i = 1; i < (rounds / 2); i++)
{
r = k[rounds - i];
k[rounds - i] = _mm_aesimc_si128 (k[i]);
k[i] = _mm_aesimc_si128 (r);
}
k[rounds / 2] = _mm_aesimc_si128 (k[rounds / 2]);
}
#endif /* __aesni_h__ */
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,55 @@
/*
*------------------------------------------------------------------
* Copyright (c) 2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*------------------------------------------------------------------
*/
#ifndef __crypto_native_h__
#define __crypto_native_h__
typedef void *(crypto_native_key_fn_t) (vnet_crypto_key_t * key);
typedef struct
{
__m128i cbc_iv[4];
} crypto_native_per_thread_data_t;
typedef struct
{
u32 crypto_engine_index;
crypto_native_per_thread_data_t *per_thread_data;
crypto_native_key_fn_t *key_fn[VNET_CRYPTO_N_ALGS];
void **key_data;
} crypto_native_main_t;
extern crypto_native_main_t crypto_native_main;
clib_error_t *crypto_native_aes_cbc_init_sse42 (vlib_main_t * vm);
clib_error_t *crypto_native_aes_cbc_init_avx2 (vlib_main_t * vm);
clib_error_t *crypto_native_aes_cbc_init_avx512 (vlib_main_t * vm);
clib_error_t *crypto_native_aes_cbc_init_vaes (vlib_main_t * vm);
clib_error_t *crypto_native_aes_gcm_init_sse42 (vlib_main_t * vm);
clib_error_t *crypto_native_aes_gcm_init_avx2 (vlib_main_t * vm);
clib_error_t *crypto_native_aes_gcm_init_avx512 (vlib_main_t * vm);
clib_error_t *crypto_native_aes_gcm_init_vaes (vlib_main_t * vm);
#endif /* __crypto_native_h__ */
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/

View File

@@ -0,0 +1,253 @@
/*
*------------------------------------------------------------------
* Copyright (c) 2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*------------------------------------------------------------------
*/
/*
*------------------------------------------------------------------
* Copyright(c) 2018, Intel Corporation All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES * LOSS OF USE,
* DATA, OR PROFITS * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*------------------------------------------------------------------
*/
/*
* Based on work by: Shay Gueron, Michael E. Kounavis, Erdinc Ozturk,
* Vinodh Gopal, James Guilford, Tomasz Kantecki
*
* References:
* [1] Vinodh Gopal et. al. Optimized Galois-Counter-Mode Implementation on
* Intel Architecture Processors. August, 2010
* [2] Erdinc Ozturk et. al. Enabling High-Performance Galois-Counter-Mode on
* Intel Architecture Processors. October, 2012.
* [3] intel-ipsec-mb library, https://github.com/01org/intel-ipsec-mb.git
*
* Definitions:
* GF Galois Extension Field GF(2^128) - finite field where elements are
* represented as polynomials with coefficients in GF(2) with the
* highest degree of 127. Polynomials are represented as 128-bit binary
* numbers where each bit represents one coefficient.
* e.g. polynomial x^5 + x^3 + x + 1 is represented in binary 101011.
* H hash key (128 bit)
* POLY irreducible polynomial x^127 + x^7 + x^2 + x + 1
* RPOLY irreducible polynomial x^128 + x^127 + x^126 + x^121 + 1
* + addition in GF, which equals to XOR operation
* * multiplication in GF
*
* GF multiplication consists of 2 steps:
* - carry-less multiplication of two 128-bit operands into 256-bit result
* - reduction of 256-bit result into 128-bit with modulo POLY
*
* GHash is calculated on 128-bit blocks of data according to the following
* formula:
* GH = (GH + data) * hash_key
*
* To avoid bit-reflection of data, this code uses GF multipication
* with reversed polynomial:
* a * b * x^-127 mod RPOLY
*
* To improve computation speed table Hi is precomputed with powers of H',
* where H' is calculated as H<<1 mod RPOLY.
* This allows us to improve performance by deferring reduction. For example
* to caclulate ghash of 4 128-bit blocks of data (b0, b1, b2, b3), we can do:
*
* __i128 Hi[4];
* ghash_precompute (H, Hi, 4);
*
* ghash_data_t _gd, *gd = &_gd;
* ghash_mul_first (gd, GH ^ b0, Hi[3]);
* ghash_mul_next (gd, b1, Hi[2]);
* ghash_mul_next (gd, b2, Hi[1]);
* ghash_mul_next (gd, b3, Hi[0]);
* ghash_reduce (gd);
* ghash_reduce2 (gd);
* GH = ghash_final (gd);
*
* Reduction step is split into 3 functions so it can be better interleaved
* with other code, (i.e. with AES computation).
*/
#ifndef __ghash_h__
#define __ghash_h__
/* on AVX-512 systems we can save a clock cycle by using ternary logic
instruction to calculate a XOR b XOR c */
static_always_inline __m128i
ghash_xor3 (__m128i a, __m128i b, __m128i c)
{
#if defined (__AVX512F__)
return _mm_ternarylogic_epi32 (a, b, c, 0x96);
#endif
return a ^ b ^ c;
}
typedef struct
{
__m128i mid, hi, lo, tmp_lo, tmp_hi;
int pending;
} ghash_data_t;
static const __m128i ghash_poly = { 1, 0xC200000000000000 };
static const __m128i ghash_poly2 = { 0x1C2000000, 0xC200000000000000 };
static_always_inline void
ghash_mul_first (ghash_data_t * gd, __m128i a, __m128i b)
{
/* a1 * b1 */
gd->hi = _mm_clmulepi64_si128 (a, b, 0x11);
/* a0 * b0 */
gd->lo = _mm_clmulepi64_si128 (a, b, 0x00);
/* a0 * b1 ^ a1 * b0 */
gd->mid = (_mm_clmulepi64_si128 (a, b, 0x01) ^
_mm_clmulepi64_si128 (a, b, 0x10));
/* set gd->pending to 0 so next invocation of ghash_mul_next(...) knows that
there is no pending data in tmp_lo and tmp_hi */
gd->pending = 0;
}
static_always_inline void
ghash_mul_next (ghash_data_t * gd, __m128i a, __m128i b)
{
/* a1 * b1 */
__m128i hi = _mm_clmulepi64_si128 (a, b, 0x11);
/* a0 * b0 */
__m128i lo = _mm_clmulepi64_si128 (a, b, 0x00);
/* this branch will be optimized out by the compiler, and it allows us to
reduce number of XOR operations by using ternary logic */
if (gd->pending)
{
/* there is peding data from previous invocation so we can XOR */
gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, hi);
gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, lo);
gd->pending = 0;
}
else
{
/* there is no peding data from previous invocation so we postpone XOR */
gd->tmp_hi = hi;
gd->tmp_lo = lo;
gd->pending = 1;
}
/* gd->mid ^= a0 * b1 ^ a1 * b0 */
gd->mid = ghash_xor3 (gd->mid,
_mm_clmulepi64_si128 (a, b, 0x01),
_mm_clmulepi64_si128 (a, b, 0x10));
}
static_always_inline void
ghash_reduce (ghash_data_t * gd)
{
__m128i r;
/* Final combination:
gd->lo ^= gd->mid << 64
gd->hi ^= gd->mid >> 64 */
__m128i midl = _mm_slli_si128 (gd->mid, 8);
__m128i midr = _mm_srli_si128 (gd->mid, 8);
if (gd->pending)
{
gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, midl);
gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, midr);
}
else
{
gd->lo ^= midl;
gd->hi ^= midr;
}
r = _mm_clmulepi64_si128 (ghash_poly2, gd->lo, 0x01);
gd->lo ^= _mm_slli_si128 (r, 8);
}
static_always_inline void
ghash_reduce2 (ghash_data_t * gd)
{
gd->tmp_lo = _mm_clmulepi64_si128 (ghash_poly2, gd->lo, 0x00);
gd->tmp_hi = _mm_clmulepi64_si128 (ghash_poly2, gd->lo, 0x10);
}
static_always_inline __m128i
ghash_final (ghash_data_t * gd)
{
return ghash_xor3 (gd->hi, _mm_srli_si128 (gd->tmp_lo, 4),
_mm_slli_si128 (gd->tmp_hi, 4));
}
static_always_inline __m128i
ghash_mul (__m128i a, __m128i b)
{
ghash_data_t _gd, *gd = &_gd;
ghash_mul_first (gd, a, b);
ghash_reduce (gd);
ghash_reduce2 (gd);
return ghash_final (gd);
}
static_always_inline void
ghash_precompute (__m128i H, __m128i * Hi, int count)
{
__m128i r;
/* calcullate H<<1 mod poly from the hash key */
r = _mm_srli_epi64 (H, 63);
H = _mm_slli_epi64 (H, 1);
H |= _mm_slli_si128 (r, 8);
r = _mm_srli_si128 (r, 8);
r = _mm_shuffle_epi32 (r, 0x24);
/* *INDENT-OFF* */
r = _mm_cmpeq_epi32 (r, (__m128i) (u32x4) {1, 0, 0, 1});
/* *INDENT-ON* */
Hi[0] = H ^ (r & ghash_poly);
/* calculate H^(i + 1) */
for (int i = 1; i < count; i++)
Hi[i] = ghash_mul (Hi[0], Hi[i - 1]);
}
#endif /* __ghash_h__ */
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/

View File

@@ -0,0 +1,135 @@
/*
*------------------------------------------------------------------
* Copyright (c) 2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*------------------------------------------------------------------
*/
#include <vlib/vlib.h>
#include <vnet/plugin/plugin.h>
#include <vnet/crypto/crypto.h>
#include <crypto_native/crypto_native.h>
crypto_native_main_t crypto_native_main;
static void
crypto_native_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
vnet_crypto_key_index_t idx)
{
vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
crypto_native_main_t *cm = &crypto_native_main;
if (cm->key_fn[key->alg] == 0)
return;
if (kop == VNET_CRYPTO_KEY_OP_DEL)
{
if (idx >= vec_len (cm->key_data))
return;
if (cm->key_data[idx] == 0)
return;
clib_mem_free_s (cm->key_data[idx]);
cm->key_data[idx] = 0;
return;
}
vec_validate_aligned (cm->key_data, idx, CLIB_CACHE_LINE_BYTES);
if (kop == VNET_CRYPTO_KEY_OP_MODIFY && cm->key_data[idx])
{
clib_mem_free_s (cm->key_data[idx]);
}
cm->key_data[idx] = cm->key_fn[key->alg] (key);
}
clib_error_t *
crypto_native_init (vlib_main_t * vm)
{
crypto_native_main_t *cm = &crypto_native_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
clib_error_t *error = 0;
if (clib_cpu_supports_x86_aes () == 0)
return 0;
vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
CLIB_CACHE_LINE_BYTES);
cm->crypto_engine_index =
vnet_crypto_register_engine (vm, "native", 100,
"Native ISA Optimized Crypto");
if (clib_cpu_supports_vaes ())
error = crypto_native_aes_cbc_init_vaes (vm);
else if (clib_cpu_supports_avx512f ())
error = crypto_native_aes_cbc_init_avx512 (vm);
else if (clib_cpu_supports_avx2 ())
error = crypto_native_aes_cbc_init_avx2 (vm);
else
error = crypto_native_aes_cbc_init_sse42 (vm);
if (error)
goto error;
if (clib_cpu_supports_pclmulqdq ())
{
if (clib_cpu_supports_vaes ())
error = crypto_native_aes_gcm_init_vaes (vm);
else if (clib_cpu_supports_avx512f ())
error = crypto_native_aes_gcm_init_avx512 (vm);
else if (clib_cpu_supports_avx2 ())
error = crypto_native_aes_gcm_init_avx2 (vm);
else
error = crypto_native_aes_gcm_init_sse42 (vm);
if (error)
goto error;
}
vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
crypto_native_key_handler);
error:
if (error)
vec_free (cm->per_thread_data);
return error;
}
/* *INDENT-OFF* */
VLIB_INIT_FUNCTION (crypto_native_init) =
{
.runs_after = VLIB_INITS ("vnet_crypto_init"),
};
/* *INDENT-ON* */
#include <vpp/app/version.h>
/* *INDENT-OFF* */
VLIB_PLUGIN_REGISTER () = {
.version = VPP_BUILD_VER,
.description = "Intel IA32 Software Crypto Engine",
};
/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/