Compare commits

...

3 Commits

Author SHA1 Message Date
594a4a5129 ipsec: fix AES CBC IV generation (CVE-2022-46397)
For AES-CBC, the IV must be unpredictable (see NIST SP800-38a Appendix
C). Chaining IVs like is done by ipsecmb and native backends for the
VNET_CRYPTO_OP_FLAG_INIT_IV is fully predictable.
Encrypt a counter as part of the message, making the (predictable)
counter-generated IV unpredictable.

Fixes: VPP-2037
Type: fix

Change-Id: If4f192d62bf97dda553e7573331c75efa11822ae
Signed-off-by: Benoît Ganne <bganne@cisco.com>
2023-02-06 16:08:09 +01:00
babecb4132 tcp: fix bt acked_sacked on recovery
Type: fix

Signed-off-by: Florin Coras <fcoras@cisco.com>
Change-Id: I2e2d76661fbb07dd8c6afa3583bb18e01b7a7fb6
(cherry picked from commit 3e2ec42a07)
2020-10-14 14:15:59 +00:00
2a33546579 virtio: fix the tcp/udp checksum offloads
Some vhost-backend calculates the wrong checksum in
case of tcp/udp offload when driver resets tcp/udp
checksum field to '0'.

Type: fix

Signed-off-by: Steven Luong <sluong@cisco.com>
Change-Id: I3c45df487f00d7e3d949b4efb32d7f7e01d1108b
2020-10-12 12:27:25 -07:00
4 changed files with 32 additions and 12 deletions

View File

@ -296,15 +296,11 @@ vhost_user_handle_rx_offload (vlib_buffer_t * b0, u8 * b0_data,
tcp_header_t *tcp = (tcp_header_t *)
(b0_data + vnet_buffer (b0)->l4_hdr_offset);
l4_hdr_sz = tcp_header_bytes (tcp);
tcp->checksum = 0;
b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
}
else if (l4_proto == IP_PROTOCOL_UDP)
{
udp_header_t *udp =
(udp_header_t *) (b0_data + vnet_buffer (b0)->l4_hdr_offset);
l4_hdr_sz = sizeof (*udp);
udp->checksum = 0;
l4_hdr_sz = sizeof (udp_header_t);
b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
}
}

View File

@ -201,6 +201,24 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6)
return len;
}
/* IPsec IV generation: IVs requirements differ depending of the
* encryption mode: IVs must be unpredictable for AES-CBC whereas it can
* be predictable but should never be reused with the same key material
* for CTR and GCM.
* We use a packet counter as the IV for CTR and GCM, and to ensure the
* IV is unpredictable for CBC, it is then encrypted using the same key
* as the message. You can refer to NIST SP800-38a and NIST SP800-38d
* for more details. */
static_always_inline void *
esp_generate_iv (ipsec_sa_t * sa, void *payload, int iv_sz)
{
ASSERT (iv_sz >= sizeof (u64));
u64 *iv = (u64 *) (payload - iv_sz);
clib_memset_u8 (iv, 0, iv_sz);
*iv = sa->iv_counter++;
return iv;
}
static_always_inline void
esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
@ -463,6 +481,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_op_t *op;
vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
op->src = op->dst = payload;
op->key_index = sa0->crypto_key_index;
op->len = payload_len - icv_sz;
@ -481,16 +500,19 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
op->tag = payload + op->len;
op->tag_len = 16;
u64 *iv = (u64 *) (payload - iv_sz);
nonce->salt = sa0->salt;
nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++);
nonce->iv = *(u64 *) pkt_iv;
op->iv = (u8 *) nonce;
nonce++;
}
else
{
op->iv = payload - iv_sz;
op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
/* construct zero iv in front of the IP header */
op->iv = pkt_iv - hdr_len - iv_sz;
clib_memset_u8 (op->iv, 0, iv_sz);
/* include iv field in crypto */
op->src -= iv_sz;
op->len += iv_sz;
}
}

View File

@ -122,6 +122,8 @@ typedef struct
u64 replay_window;
dpo_id_t dpo;
u64 iv_counter;
vnet_crypto_key_index_t crypto_key_index;
vnet_crypto_key_index_t integ_key_index;
vnet_crypto_op_id_t crypto_enc_op_id:16;
@ -161,7 +163,6 @@ typedef struct
/* Salt used in GCM modes - stored in network byte order */
u32 salt;
u64 gcm_iv_counter;
} ipsec_sa_t;
STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline1, CLIB_CACHE_LINE_BYTES);

View File

@ -591,11 +591,12 @@ tcp_bt_sample_delivery_rate (tcp_connection_t * tc, tcp_rate_sample_t * rs)
return;
delivered = tc->bytes_acked + tc->sack_sb.last_sacked_bytes;
/* Do not count bytes that were previously sacked again */
delivered -= tc->sack_sb.last_bytes_delivered;
if (!delivered || tc->bt->head == TCP_BTS_INVALID_INDEX)
return;
/* Do not count bytes that were previously sacked again */
tc->delivered += delivered - tc->sack_sb.last_bytes_delivered;
tc->delivered += delivered;
tc->delivered_time = tcp_time_now_us (tc->c_thread_index);
if (tc->app_limited && tc->delivered > tc->app_limited)