ipsec: introduce fast path ipv6 inbound matching

This patch introduces fast path matching for inbound traffic ipv6.
Fast path uses bihash tables in order to find matching policy.
Adding and removing policies in fast path is much faster than in current
implementation. It is still new feature and further work needs
and can be done in order to improve the perfromance.

Type: feature

Change-Id: Iaef6638033666ad6eb028ffe0c8a4f4374451753
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
This commit is contained in:
Piotr Bronowski
2022-09-20 14:44:36 +00:00
committed by Fan Zhang
parent a2a7a4031b
commit 06abf23526
5 changed files with 274 additions and 95 deletions

View File

@ -52,6 +52,7 @@ typedef struct
ip_protocol_t proto; ip_protocol_t proto;
u32 spd; u32 spd;
u32 policy_index; u32 policy_index;
u32 policy_type;
u32 sa_id; u32 sa_id;
u32 spi; u32 spi;
u32 seq; u32 seq;
@ -65,9 +66,10 @@ format_ipsec_input_trace (u8 * s, va_list * args)
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
ipsec_input_trace_t *t = va_arg (*args, ipsec_input_trace_t *); ipsec_input_trace_t *t = va_arg (*args, ipsec_input_trace_t *);
s = format (s, "%U: sa_id %u spd %u policy %d spi %u (0x%08x) seq %u", s =
format_ip_protocol, t->proto, t->sa_id, format (s, "%U: sa_id %u type: %u spd %u policy %d spi %u (0x%08x) seq %u",
t->spd, t->policy_index, t->spi, t->spi, t->seq); format_ip_protocol, t->proto, t->sa_id, t->policy_type, t->spd,
t->policy_index, t->spi, t->spi, t->seq);
return s; return s;
} }
@ -162,6 +164,19 @@ ipsec_fp_in_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
tuple->is_ipv6 = 0; tuple->is_ipv6 = 0;
} }
always_inline void
ipsec_fp_in_5tuple_from_ip6_range (ipsec_fp_5tuple_t *tuple, ip6_address_t *la,
ip6_address_t *ra, u32 spi, u8 action)
{
clib_memcpy (&tuple->ip6_laddr, la, sizeof (ip6_address_t));
clib_memcpy (&tuple->ip6_raddr, ra, sizeof (ip6_address_t));
tuple->spi = spi;
tuple->action = action;
tuple->is_ipv6 = 1;
}
always_inline ipsec_policy_t * always_inline ipsec_policy_t *
ipsec_input_policy_match (ipsec_spd_t *spd, u32 sa, u32 da, ipsec_input_policy_match (ipsec_spd_t *spd, u32 sa, u32 da,
ipsec_spd_policy_type_t policy_type) ipsec_spd_policy_type_t policy_type)
@ -732,6 +747,9 @@ VLIB_NODE_FN (ipsec6_input_node) (vlib_main_t * vm,
ipsec_main_t *im = &ipsec_main; ipsec_main_t *im = &ipsec_main;
u32 ipsec_unprocessed = 0; u32 ipsec_unprocessed = 0;
u32 ipsec_matched = 0; u32 ipsec_matched = 0;
ipsec_policy_t *policies[1];
ipsec_fp_5tuple_t tuples[1];
bool ip_v6 = true;
from = vlib_frame_vector_args (from_frame); from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors; n_left_from = from_frame->n_vectors;
@ -747,7 +765,7 @@ VLIB_NODE_FN (ipsec6_input_node) (vlib_main_t * vm,
while (n_left_from > 0 && n_left_to_next > 0) while (n_left_from > 0 && n_left_to_next > 0)
{ {
u32 bi0, next0, pi0; u32 bi0, next0, pi0 = ~0;
vlib_buffer_t *b0; vlib_buffer_t *b0;
ip6_header_t *ip0; ip6_header_t *ip0;
esp_header_t *esp0; esp_header_t *esp0;
@ -784,11 +802,22 @@ VLIB_NODE_FN (ipsec6_input_node) (vlib_main_t * vm,
clib_net_to_host_u16 (ip0->payload_length) + header_size, clib_net_to_host_u16 (ip0->payload_length) + header_size,
spd0->id); spd0->id);
#endif #endif
p0 = ipsec6_input_protect_policy_match (spd0, if (im->fp_spd_ipv6_in_is_enabled &&
&ip0->src_address, PREDICT_TRUE (INDEX_INVALID !=
&ip0->dst_address, spd0->fp_spd.ip6_in_lookup_hash_idx))
clib_net_to_host_u32 {
(esp0->spi)); ipsec_fp_in_5tuple_from_ip6_range (
&tuples[0], &ip0->src_address, &ip0->dst_address,
clib_net_to_host_u32 (esp0->spi),
IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT);
ipsec_fp_in_policy_match_n (&spd0->fp_spd, ip_v6, tuples,
policies, 1);
p0 = policies[0];
}
else
p0 = ipsec6_input_protect_policy_match (
spd0, &ip0->src_address, &ip0->dst_address,
clib_net_to_host_u32 (esp0->spi));
if (PREDICT_TRUE (p0 != 0)) if (PREDICT_TRUE (p0 != 0))
{ {
@ -804,6 +833,8 @@ VLIB_NODE_FN (ipsec6_input_node) (vlib_main_t * vm,
vnet_buffer (b0)->ipsec.sad_index = p0->sa_index; vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
next0 = im->esp6_decrypt_next_index; next0 = im->esp6_decrypt_next_index;
vlib_buffer_advance (b0, header_size); vlib_buffer_advance (b0, header_size);
/* TODO Add policy matching for bypass and discard policy
* type */
goto trace0; goto trace0;
} }
else else
@ -855,11 +886,16 @@ VLIB_NODE_FN (ipsec6_input_node) (vlib_main_t * vm,
vlib_add_trace (vm, node, b0, sizeof (*tr)); vlib_add_trace (vm, node, b0, sizeof (*tr));
if (p0) if (p0)
tr->sa_id = p0->sa_id; {
tr->sa_id = p0->sa_id;
tr->policy_type = p0->type;
}
tr->proto = ip0->protocol; tr->proto = ip0->protocol;
tr->spi = clib_net_to_host_u32 (esp0->spi); tr->spi = clib_net_to_host_u32 (esp0->spi);
tr->seq = clib_net_to_host_u32 (esp0->seq); tr->seq = clib_net_to_host_u32 (esp0->seq);
tr->spd = spd0->id; tr->spd = spd0->id;
tr->policy_index = pi0;
} }
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,

View File

@ -189,7 +189,7 @@ ipsec_add_del_spd (vlib_main_t * vm, u32 spd_id, int is_add)
fp_spd->name6_in = format (0, "spd_%u_fp_ip6_in", spd_id); fp_spd->name6_in = format (0, "spd_%u_fp_ip6_in", spd_id);
pool_get (im->fp_ip6_lookup_hashes_pool, bihash_table); pool_get (im->fp_ip6_lookup_hashes_pool, bihash_table);
fp_spd->ip6_out_lookup_hash_idx = fp_spd->ip6_in_lookup_hash_idx =
bihash_table - im->fp_ip6_lookup_hashes_pool; bihash_table - im->fp_ip6_lookup_hashes_pool;
clib_bihash_init_40_8 (bihash_table, (char *) fp_spd->name6_in, clib_bihash_init_40_8 (bihash_table, (char *) fp_spd->name6_in,
im->fp_lookup_hash_buckets, im->fp_lookup_hash_buckets,

View File

@ -20,9 +20,8 @@
#include <vnet/ipsec/ipsec.h> #include <vnet/ipsec/ipsec.h>
static_always_inline int static_always_inline int
single_rule_match_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *match) single_rule_out_match_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *match)
{ {
if (PREDICT_FALSE (policy->is_ipv6 != match->is_ipv6)) if (PREDICT_FALSE (policy->is_ipv6 != match->is_ipv6))
return (0); return (0);
@ -138,7 +137,98 @@ static_always_inline u32
ipsec_fp_in_ip6_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples, ipsec_fp_in_ip6_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
ipsec_policy_t **policies, u32 n) ipsec_policy_t **policies, u32 n)
{ {
return 0; u32 last_priority[n];
u32 i = 0;
u32 counter = 0;
ipsec_fp_mask_type_entry_t *mte;
ipsec_fp_mask_id_t *mti;
ipsec_fp_5tuple_t *match = tuples;
ipsec_policy_t *policy;
u32 n_left = n;
clib_bihash_kv_40_8_t kv;
/* result of the lookup */
clib_bihash_kv_40_8_t result;
ipsec_fp_lookup_value_t *result_val =
(ipsec_fp_lookup_value_t *) &result.value;
u64 *pkey, *pmatch, *pmask;
ipsec_main_t *im = &ipsec_main;
ipsec_spd_fp_t *pspd_fp = (ipsec_spd_fp_t *) spd_fp;
ipsec_fp_mask_id_t *mask_type_ids = pspd_fp->fp_mask_ids[match->action];
clib_bihash_40_8_t *bihash_table = pool_elt_at_index (
im->fp_ip6_lookup_hashes_pool, pspd_fp->ip6_in_lookup_hash_idx);
/* clear the list of matched policies pointers */
clib_memset (policies, 0, n * sizeof (*policies));
clib_memset (last_priority, 0, n * sizeof (u32));
n_left = n;
while (n_left)
{
vec_foreach (mti, mask_type_ids)
{
mte = im->fp_mask_types + mti->mask_type_idx;
if (mte->mask.action == 0)
continue;
pmatch = (u64 *) match->kv_40_8.key;
pmask = (u64 *) mte->mask.kv_40_8.key;
pkey = (u64 *) kv.key;
*pkey++ = *pmatch++ & *pmask++;
*pkey++ = *pmatch++ & *pmask++;
*pkey++ = *pmatch++ & *pmask++;
*pkey++ = *pmatch++ & *pmask++;
*pkey = *pmatch & *pmask;
int res =
clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
/* lookup the hash by each packet in the burst for this mask. */
if (res == 0)
{
/* There is a hit in the hash table. */
/* Find the policy with highest priority. */
/* Store the lookup results in a dedicated array. */
if (vec_len (result_val->fp_policies_ids) > 1)
{
u32 *policy_id;
vec_foreach (policy_id, result_val->fp_policies_ids)
{
policy = im->policies + *policy_id;
if ((last_priority[i] < policy->priority) &&
(single_rule_in_match_5tuple (policy, match)))
{
last_priority[i] = policy->priority;
if (policies[i] == 0)
counter++;
policies[i] = policy;
}
}
}
else
{
u32 *policy_id;
ASSERT (vec_len (result_val->fp_policies_ids) == 1);
policy_id = result_val->fp_policies_ids;
policy = im->policies + *policy_id;
if ((last_priority[i] < policy->priority) &&
(single_rule_in_match_5tuple (policy, match)))
{
last_priority[i] = policy->priority;
if (policies[i] == 0)
counter++;
policies[i] = policy;
}
}
}
}
i++;
n_left--;
match++;
}
return counter;
} }
static_always_inline u32 static_always_inline u32
@ -253,7 +343,7 @@ ipsec_fp_in_policy_match_n (void *spd_fp, u8 is_ipv6,
} }
static_always_inline u32 static_always_inline u32
ipsec_fp_ip6_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples, ipsec_fp_out_ip6_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
ipsec_policy_t **policies, u32 *ids, u32 n) ipsec_policy_t **policies, u32 *ids, u32 n)
{ {
@ -288,6 +378,8 @@ ipsec_fp_ip6_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
vec_foreach (mti, mask_type_ids) vec_foreach (mti, mask_type_ids)
{ {
mte = im->fp_mask_types + mti->mask_type_idx; mte = im->fp_mask_types + mti->mask_type_idx;
if (mte->mask.action != 0)
continue;
pmatch = (u64 *) match->kv_40_8.key; pmatch = (u64 *) match->kv_40_8.key;
pmask = (u64 *) mte->mask.kv_40_8.key; pmask = (u64 *) mte->mask.kv_40_8.key;
@ -316,7 +408,7 @@ ipsec_fp_ip6_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
{ {
policy = im->policies + *policy_id; policy = im->policies + *policy_id;
if (single_rule_match_5tuple (policy, match)) if (single_rule_out_match_5tuple (policy, match))
{ {
if (last_priority[i] < policy->priority) if (last_priority[i] < policy->priority)
{ {
@ -335,7 +427,7 @@ ipsec_fp_ip6_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
ASSERT (vec_len (result_val->fp_policies_ids) == 1); ASSERT (vec_len (result_val->fp_policies_ids) == 1);
policy_id = result_val->fp_policies_ids; policy_id = result_val->fp_policies_ids;
policy = im->policies + *policy_id; policy = im->policies + *policy_id;
if (single_rule_match_5tuple (policy, match)) if (single_rule_out_match_5tuple (policy, match))
{ {
if (last_priority[i] < policy->priority) if (last_priority[i] < policy->priority)
{ {
@ -357,7 +449,7 @@ ipsec_fp_ip6_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
} }
static_always_inline u32 static_always_inline u32
ipsec_fp_ip4_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples, ipsec_fp_out_ip4_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
ipsec_policy_t **policies, u32 *ids, u32 n) ipsec_policy_t **policies, u32 *ids, u32 n)
{ {
@ -420,7 +512,7 @@ ipsec_fp_ip4_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
policy = im->policies + *policy_id; policy = im->policies + *policy_id;
if ((last_priority[i] < policy->priority) && if ((last_priority[i] < policy->priority) &&
(single_rule_match_5tuple (policy, match))) (single_rule_out_match_5tuple (policy, match)))
{ {
last_priority[i] = policy->priority; last_priority[i] = policy->priority;
if (policies[i] == 0) if (policies[i] == 0)
@ -437,7 +529,7 @@ ipsec_fp_ip4_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
policy_id = result_val->fp_policies_ids; policy_id = result_val->fp_policies_ids;
policy = im->policies + *policy_id; policy = im->policies + *policy_id;
if ((last_priority[i] < policy->priority) && if ((last_priority[i] < policy->priority) &&
(single_rule_match_5tuple (policy, match))) (single_rule_out_match_5tuple (policy, match)))
{ {
last_priority[i] = policy->priority; last_priority[i] = policy->priority;
if (policies[i] == 0) if (policies[i] == 0)
@ -469,9 +561,9 @@ ipsec_fp_out_policy_match_n (void *spd_fp, u8 is_ipv6,
{ {
if (is_ipv6) if (is_ipv6)
return ipsec_fp_ip6_out_policy_match_n (spd_fp, tuples, policies, ids, n); return ipsec_fp_out_ip6_policy_match_n (spd_fp, tuples, policies, ids, n);
else else
return ipsec_fp_ip4_out_policy_match_n (spd_fp, tuples, policies, ids, n); return ipsec_fp_out_ip4_policy_match_n (spd_fp, tuples, policies, ids, n);
} }
#endif /* !IPSEC_SPD_FP_LOOKUP_H */ #endif /* !IPSEC_SPD_FP_LOOKUP_H */

View File

@ -85,12 +85,39 @@ ipsec_is_policy_inbound (ipsec_policy_t *policy)
{ {
if (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT || if (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS || policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD) policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD ||
policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT ||
policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS ||
policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD)
return 1; return 1;
return 0; return 0;
} }
static_always_inline int
ipsec_is_fp_enabled (ipsec_main_t *im, ipsec_spd_t *spd,
ipsec_policy_t *policy)
{
if ((im->fp_spd_ipv4_out_is_enabled &&
PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_out_lookup_hash_idx) &&
policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
(im->fp_spd_ipv4_in_is_enabled &&
PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_in_lookup_hash_idx) &&
(policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)) ||
(im->fp_spd_ipv6_in_is_enabled &&
PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_in_lookup_hash_idx) &&
(policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT ||
policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS ||
policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD)) ||
(im->fp_spd_ipv6_out_is_enabled &&
PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_out_lookup_hash_idx) &&
policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
return 1;
return 0;
}
int int
ipsec_add_del_policy (vlib_main_t * vm, ipsec_add_del_policy (vlib_main_t * vm,
ipsec_policy_t * policy, int is_add, u32 * stat_index) ipsec_policy_t * policy, int is_add, u32 * stat_index)
@ -178,20 +205,7 @@ ipsec_add_del_policy (vlib_main_t * vm,
* Try adding the policy into fast path SPD first. Only adding to * Try adding the policy into fast path SPD first. Only adding to
* traditional SPD when failed. * traditional SPD when failed.
**/ **/
if ((im->fp_spd_ipv4_out_is_enabled && if (ipsec_is_fp_enabled (im, spd, policy))
PREDICT_TRUE (INDEX_INVALID !=
spd->fp_spd.ip4_out_lookup_hash_idx) &&
policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
(im->fp_spd_ipv4_in_is_enabled &&
PREDICT_TRUE (INDEX_INVALID !=
spd->fp_spd.ip4_in_lookup_hash_idx) &&
(policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)) ||
(im->fp_spd_ipv6_out_is_enabled &&
PREDICT_TRUE (INDEX_INVALID !=
spd->fp_spd.ip6_out_lookup_hash_idx) &&
policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 1, return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 1,
stat_index); stat_index);
@ -216,20 +230,8 @@ ipsec_add_del_policy (vlib_main_t * vm,
* traditional SPD when fp delete fails. * traditional SPD when fp delete fails.
**/ **/
if ((im->fp_spd_ipv4_out_is_enabled && if (ipsec_is_fp_enabled (im, spd, policy))
PREDICT_TRUE (INDEX_INVALID !=
spd->fp_spd.ip4_out_lookup_hash_idx) &&
policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
(im->fp_spd_ipv4_in_is_enabled &&
PREDICT_TRUE (INDEX_INVALID !=
spd->fp_spd.ip4_in_lookup_hash_idx) &&
(policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)) ||
(im->fp_spd_ipv6_out_is_enabled &&
PREDICT_TRUE (INDEX_INVALID !=
spd->fp_spd.ip6_out_lookup_hash_idx) &&
policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
{ {
if (policy->policy == IPSEC_POLICY_ACTION_PROTECT) if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
{ {
@ -426,7 +428,8 @@ ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
} }
static_always_inline void static_always_inline void
ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask) ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
bool inbound)
{ {
u64 *pladdr_start = (u64 *) &policy->laddr.start; u64 *pladdr_start = (u64 *) &policy->laddr.start;
u64 *pladdr_stop = (u64 *) &policy->laddr.stop; u64 *pladdr_stop = (u64 *) &policy->laddr.stop;
@ -470,7 +473,18 @@ ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask)
else else
*prmask = 0; *prmask = 0;
ipsec_fp_get_policy_ports_mask (policy, mask); if (inbound)
{
if (policy->type != IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT)
mask->spi = 0;
mask->protocol = 0;
}
else
{
mask->action = 0;
ipsec_fp_get_policy_ports_mask (policy, mask);
}
} }
static_always_inline void static_always_inline void
@ -642,7 +656,7 @@ ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
int res; int res;
bool inbound = ipsec_is_policy_inbound (policy); bool inbound = ipsec_is_policy_inbound (policy);
ipsec_fp_ip6_get_policy_mask (policy, &mask); ipsec_fp_ip6_get_policy_mask (policy, &mask, inbound);
pool_get (im->policies, vp); pool_get (im->policies, vp);
policy_index = vp - im->policies; policy_index = vp - im->policies;
vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index); vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
@ -748,7 +762,7 @@ ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
ipsec_policy_t *vp; ipsec_policy_t *vp;
u32 ii, iii, imt; u32 ii, iii, imt;
ipsec_fp_ip6_get_policy_mask (policy, &mask); ipsec_fp_ip6_get_policy_mask (policy, &mask, inbound);
ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound); ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv); fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result); res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);

View File

@ -7,7 +7,6 @@ from framework import VppTestRunner
from template_ipsec import IPSecIPv4Fwd from template_ipsec import IPSecIPv4Fwd
from template_ipsec import IPSecIPv6Fwd from template_ipsec import IPSecIPv6Fwd
from test_ipsec_esp import TemplateIpsecEsp from test_ipsec_esp import TemplateIpsecEsp
import pdb
def debug_signal_handler(signal, frame): def debug_signal_handler(signal, frame):
@ -35,30 +34,6 @@ class SpdFastPathInbound(IPSecIPv4Fwd):
cls.vpp_cmdline.extend(["ipsec", "{", "ipv4-inbound-spd-fast-path on", "}"]) cls.vpp_cmdline.extend(["ipsec", "{", "ipv4-inbound-spd-fast-path on", "}"])
cls.logger.info("VPP modified cmdline is %s" % " ".join(cls.vpp_cmdline)) cls.logger.info("VPP modified cmdline is %s" % " ".join(cls.vpp_cmdline))
@classmethod
def create_enc_stream(self, src_if, dst_if, pkt_count, src_prt=1234, dst_prt=5678):
packets = []
params = self.params[socket.AF_INET]
for i in range(pkt_count):
# create packet info stored in the test case instance
info = self.create_packet_info(src_if, dst_if)
# convert the info into packet payload
payload = self.info_to_payload(info)
# create the packet itself
p = Ether(
src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
) / params.scapy_tra_sa.encrypt(
IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4)
/ UDP(sport=src_prt, dport=dst_prt)
/ Raw(payload)
)
# store a copy of the packet in the packet info
info.data = p.copy()
# append the packet to the list
packets.append(p)
# return the created packet list
return packets
class SpdFastPathInboundProtect(TemplateIpsecEsp): class SpdFastPathInboundProtect(TemplateIpsecEsp):
@classmethod @classmethod
@ -98,6 +73,29 @@ class SpdFastPathIPv6Inbound(IPSecIPv6Fwd):
cls.logger.info("VPP modified cmdline is %s" % " ".join(cls.vpp_cmdline)) cls.logger.info("VPP modified cmdline is %s" % " ".join(cls.vpp_cmdline))
class SpdFastPathIPv6InboundProtect(TemplateIpsecEsp):
@classmethod
def setUpConstants(cls):
super(SpdFastPathIPv6InboundProtect, cls).setUpConstants()
cls.vpp_cmdline.extend(["ipsec", "{", "ipv6-inbound-spd-fast-path on", "}"])
cls.logger.info("VPP modified cmdline is %s" % " ".join(cls.vpp_cmdline))
@classmethod
def setUpClass(cls):
super(SpdFastPathIPv6InboundProtect, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(SpdFastPathIPv6InboundProtect, cls).tearDownClass()
def setUp(self):
super(SpdFastPathIPv6InboundProtect, self).setUp()
def tearDown(self):
self.unconfig_network()
super(SpdFastPathIPv6InboundProtect, self).tearDown()
class IPSec4SpdTestCaseBypass(SpdFastPathInbound): class IPSec4SpdTestCaseBypass(SpdFastPathInbound):
""" IPSec/IPv4 inbound: Policy mode test case with fast path \ """ IPSec/IPv4 inbound: Policy mode test case with fast path \
(add bypass)""" (add bypass)"""
@ -206,17 +204,12 @@ class IPSec4SpdTestCaseDiscard(SpdFastPathInbound):
# even though it's lower priority # even though it's lower priority
policy_0 = self.spd_add_rem_policy( # inbound, priority 10 policy_0 = self.spd_add_rem_policy( # inbound, priority 10
1, 1,
self.pg1,
self.pg0, self.pg0,
self.pg1,
socket.IPPROTO_UDP, socket.IPPROTO_UDP,
is_out=0, is_out=0,
priority=10, priority=10,
policy_type="discard", policy_type="discard",
ip_range=True,
local_ip_start=self.pg0.remote_ip4,
local_ip_stop=self.pg0.remote_ip4,
remote_ip_start=self.pg1.remote_ip4,
remote_ip_stop=self.pg1.remote_ip4,
) )
# create output rule so we can capture forwarded packets # create output rule so we can capture forwarded packets
@ -264,16 +257,9 @@ class IPSec4SpdTestCaseProtect(SpdFastPathInboundProtect):
super(IPSec4SpdTestCaseProtect, self).tearDown() super(IPSec4SpdTestCaseProtect, self).tearDown()
def test_ipsec_spd_inbound_protect(self): def test_ipsec_spd_inbound_protect(self):
# In this test case, packets in IPv4 FWD path are configured # In this test case, encrypted packets in IPv4
# PROTECT path are configured
# to go through IPSec inbound SPD policy lookup. # to go through IPSec inbound SPD policy lookup.
#
# 2 inbound SPD rules (1 HIGH and 1 LOW) are added.
# - High priority rule action is set to DISCARD.
# - Low priority rule action is set to BYPASS.
#
# Since BYPASS rules take precedence over DISCARD
# (the order being PROTECT, BYPASS, DISCARD) we expect the
# BYPASS rule to match and traffic to be correctly forwarded.
pkt_count = 5 pkt_count = 5
payload_size = 64 payload_size = 64
@ -840,5 +826,56 @@ class IPSec4SpdTestCaseMultiple(SpdFastPathInbound):
self.verify_policy_match(0, policy_22) self.verify_policy_match(0, policy_22)
class IPSec6SpdTestCaseProtect(SpdFastPathIPv6InboundProtect):
""" IPSec/IPv6 inbound: Policy mode test case with fast path \
(add protect)"""
@classmethod
def setUpClass(cls):
super(IPSec6SpdTestCaseProtect, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(IPSec6SpdTestCaseProtect, cls).tearDownClass()
def setUp(self):
super(IPSec6SpdTestCaseProtect, self).setUp()
def tearDown(self):
super(IPSec6SpdTestCaseProtect, self).tearDown()
def test_ipsec6_spd_inbound_protect(self):
pkt_count = 5
payload_size = 64
p = self.params[socket.AF_INET6]
send_pkts = self.gen_encrypt_pkts6(
p,
p.scapy_tra_sa,
self.tra_if,
src=self.tra_if.local_ip6,
dst=self.tra_if.remote_ip6,
count=pkt_count,
payload_size=payload_size,
)
recv_pkts = self.send_and_expect(self.tra_if, send_pkts, self.tra_if)
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show ipsec all"))
pkts = p.tra_sa_in.get_stats()["packets"]
self.assertEqual(
pkts,
pkt_count,
"incorrect SA in counts: expected %d != %d" % (pkt_count, pkts),
)
pkts = p.tra_sa_out.get_stats()["packets"]
self.assertEqual(
pkts,
pkt_count,
"incorrect SA out counts: expected %d != %d" % (pkt_count, pkts),
)
self.assertEqual(p.tra_sa_out.get_lost(), 0)
self.assertEqual(p.tra_sa_in.get_lost(), 0)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main(testRunner=VppTestRunner) unittest.main(testRunner=VppTestRunner)