ipsec: fix AES CBC IV generation (CVE-2022-46397)

For AES-CBC, the IV must be unpredictable (see NIST SP800-38a Appendix
C). Chaining IVs like is done by ipsecmb and native backends for the
VNET_CRYPTO_OP_FLAG_INIT_IV is fully predictable.
Encrypt a counter as part of the message, making the (predictable)
counter-generated IV unpredictable.

Fixes: VPP-2037
Type: fix

Change-Id: If4f192d62bf97dda553e7573331c75efa11822ae
Signed-off-by: Benoît Ganne <bganne@cisco.com>
This commit is contained in:
Benoît Ganne
2022-01-18 15:56:41 +01:00
committed by Dave Wallace
parent 73fdb095b3
commit bf73623ecc
3 changed files with 65 additions and 29 deletions

View File

@ -338,7 +338,7 @@ typedef struct
i16 crypto_start_offset; /* first buffer offset */ i16 crypto_start_offset; /* first buffer offset */
i16 integ_start_offset; i16 integ_start_offset;
/* adj total_length for integ, e.g.4 bytes for IPSec ESN */ /* adj total_length for integ, e.g.4 bytes for IPSec ESN */
u16 integ_length_adj; i16 integ_length_adj;
vnet_crypto_op_status_t status : 8; vnet_crypto_op_status_t status : 8;
u8 flags; /**< share same VNET_CRYPTO_OP_FLAG_* values */ u8 flags; /**< share same VNET_CRYPTO_OP_FLAG_* values */
} vnet_crypto_async_frame_elt_t; } vnet_crypto_async_frame_elt_t;
@ -622,7 +622,7 @@ static_always_inline void
vnet_crypto_async_add_to_frame (vlib_main_t *vm, vnet_crypto_async_frame_t *f, vnet_crypto_async_add_to_frame (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
u32 key_index, u32 crypto_len, u32 key_index, u32 crypto_len,
i16 integ_len_adj, i16 crypto_start_offset, i16 integ_len_adj, i16 crypto_start_offset,
u16 integ_start_offset, u32 buffer_index, i16 integ_start_offset, u32 buffer_index,
u16 next_node, u8 *iv, u8 *tag, u8 *aad, u16 next_node, u8 *iv, u8 *tag, u8 *aad,
u8 flags) u8 flags)
{ {

View File

@ -237,6 +237,24 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
return len; return len;
} }
/* IPsec IV generation: IVs requirements differ depending of the
* encryption mode: IVs must be unpredictable for AES-CBC whereas it can
* be predictable but should never be reused with the same key material
* for CTR and GCM.
* We use a packet counter as the IV for CTR and GCM, and to ensure the
* IV is unpredictable for CBC, it is then encrypted using the same key
* as the message. You can refer to NIST SP800-38a and NIST SP800-38d
* for more details. */
static_always_inline void *
esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
{
ASSERT (iv_sz >= sizeof (u64));
u64 *iv = (u64 *) (payload - iv_sz);
clib_memset_u8 (iv, 0, iv_sz);
*iv = sa->iv_counter++;
return iv;
}
static_always_inline void static_always_inline void
esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node, esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_op_t * ops, vlib_buffer_t * b[], vnet_crypto_op_t * ops, vlib_buffer_t * b[],
@ -390,27 +408,29 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_op_t *op; vnet_crypto_op_t *op;
vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
vnet_crypto_op_init (op, sa0->crypto_enc_op_id); vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
u8 *crypto_start = payload;
/* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
* have enough space for ESP header and footer which includes ICV */
ASSERT (payload_len > icv_sz);
u16 crypto_len = payload_len - icv_sz;
/* generate the IV in front of the payload */
void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
op->src = op->dst = payload;
op->key_index = sa0->crypto_key_index; op->key_index = sa0->crypto_key_index;
op->len = payload_len - icv_sz;
op->user_data = bi; op->user_data = bi;
if (ipsec_sa_is_set_IS_CTR (sa0)) if (ipsec_sa_is_set_IS_CTR (sa0))
{ {
ASSERT (sizeof (u64) == iv_sz);
/* construct nonce in a scratch space in front of the IP header */ /* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce = esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (payload - sizeof (u64) - hdr_len - (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
sizeof (*nonce));
u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
if (ipsec_sa_is_set_IS_AEAD (sa0)) if (ipsec_sa_is_set_IS_AEAD (sa0))
{ {
/* constuct aad in a scratch space in front of the nonce */ /* constuct aad in a scratch space in front of the nonce */
op->aad = (u8 *) nonce - sizeof (esp_aead_t); op->aad = (u8 *) nonce - sizeof (esp_aead_t);
op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi); op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
op->tag = payload + op->len; op->tag = payload + crypto_len;
op->tag_len = 16; op->tag_len = 16;
} }
else else
@ -419,13 +439,17 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
} }
nonce->salt = sa0->salt; nonce->salt = sa0->salt;
nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++); nonce->iv = *(u64 *) pkt_iv;
op->iv = (u8 *) nonce; op->iv = (u8 *) nonce;
} }
else else
{ {
op->iv = payload - iv_sz; /* construct zero iv in front of the IP header */
op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV; op->iv = pkt_iv - hdr_len - iv_sz;
clib_memset_u8 (op->iv, 0, iv_sz);
/* include iv field in crypto */
crypto_start -= iv_sz;
crypto_len += iv_sz;
} }
if (lb != b[0]) if (lb != b[0])
@ -434,8 +458,15 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks); op->chunk_index = vec_len (ptd->chunks);
op->tag = vlib_buffer_get_tail (lb) - icv_sz; op->tag = vlib_buffer_get_tail (lb) - icv_sz;
esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload, esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
payload_len, &op->n_chunks); crypto_start, crypto_len + icv_sz,
&op->n_chunks);
}
else
{
/* not chained */
op->src = op->dst = crypto_start;
op->len = crypto_len;
} }
} }
@ -485,26 +516,26 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
u8 *tag, *iv, *aad = 0; u8 *tag, *iv, *aad = 0;
u8 flag = 0; u8 flag = 0;
u32 key_index; u32 key_index;
i16 crypto_start_offset, integ_start_offset = 0; i16 crypto_start_offset, integ_start_offset;
u16 crypto_total_len, integ_total_len; u16 crypto_total_len, integ_total_len;
post->next_index = next; post->next_index = next;
/* crypto */ /* crypto */
crypto_start_offset = payload - b->data; crypto_start_offset = integ_start_offset = payload - b->data;
crypto_total_len = integ_total_len = payload_len - icv_sz; crypto_total_len = integ_total_len = payload_len - icv_sz;
tag = payload + crypto_total_len; tag = payload + crypto_total_len;
key_index = sa->linked_key_index; key_index = sa->linked_key_index;
/* generate the IV in front of the payload */
void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
if (ipsec_sa_is_set_IS_CTR (sa)) if (ipsec_sa_is_set_IS_CTR (sa))
{ {
ASSERT (sizeof (u64) == iv_sz);
/* construct nonce in a scratch space in front of the IP header */ /* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) - esp_ctr_nonce_t *nonce =
hdr_len - sizeof (*nonce)); (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
if (ipsec_sa_is_set_IS_AEAD (sa)) if (ipsec_sa_is_set_IS_AEAD (sa))
{ {
/* constuct aad in a scratch space in front of the nonce */ /* constuct aad in a scratch space in front of the nonce */
@ -518,13 +549,17 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
} }
nonce->salt = sa->salt; nonce->salt = sa->salt;
nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++); nonce->iv = *(u64 *) pkt_iv;
iv = (u8 *) nonce; iv = (u8 *) nonce;
} }
else else
{ {
iv = payload - iv_sz; /* construct zero iv in front of the IP header */
flag |= VNET_CRYPTO_OP_FLAG_INIT_IV; iv = pkt_iv - hdr_len - iv_sz;
clib_memset_u8 (iv, 0, iv_sz);
/* include iv field in crypto */
crypto_start_offset -= iv_sz;
crypto_total_len += iv_sz;
} }
if (lb != b) if (lb != b)
@ -532,13 +567,14 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
/* chain */ /* chain */
flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
tag = vlib_buffer_get_tail (lb) - icv_sz; tag = vlib_buffer_get_tail (lb) - icv_sz;
crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz, crypto_total_len = esp_encrypt_chain_crypto (
payload, payload_len, 0); vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
crypto_total_len + icv_sz, 0);
} }
if (sa->integ_op_id) if (sa->integ_op_id)
{ {
integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t); integ_start_offset -= iv_sz + sizeof (esp_header_t);
integ_total_len += iv_sz + sizeof (esp_header_t); integ_total_len += iv_sz + sizeof (esp_header_t);
if (b != lb) if (b != lb)

View File

@ -133,7 +133,7 @@ typedef struct
u32 seq; u32 seq;
u32 seq_hi; u32 seq_hi;
u64 replay_window; u64 replay_window;
u64 ctr_iv_counter; u64 iv_counter;
dpo_id_t dpo; dpo_id_t dpo;
vnet_crypto_key_index_t crypto_key_index; vnet_crypto_key_index_t crypto_key_index;