vxlan: add tunnel cache to graph node

Type: improvement

Signed-off-by: Drenfong Wong <drenfong.wang@intel.com>
Change-Id: Ia81aaa86fe071cbbed028cc85c5f3fa0f1940a0f
This commit is contained in:
Junfeng Wang
2021-03-09 16:44:57 +08:00
committed by John Lo
parent 162b70d50a
commit 290526e3c7
9 changed files with 49 additions and 47 deletions
+6 -10
View File
@@ -870,10 +870,6 @@ ip_geneve_bypass_inline (vlib_main_t * vm,
vtep6_key_t last_vtep6; /* last IPv6 address / fib index
matching a local VTEP address */
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
#ifdef CLIB_HAVE_VEC512
vtep4_cache_t vtep4_u512;
clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
#endif
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -968,8 +964,8 @@ ip_geneve_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
&vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
#endif
@@ -1051,8 +1047,8 @@ ip_geneve_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&vxm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&vxm->vtep_table, b1, ip41, &last_vtep4,
&vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b1, ip41, &last_vtep4))
#endif
@@ -1171,8 +1167,8 @@ ip_geneve_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
&vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
#endif
+5
View File
@@ -186,6 +186,11 @@ typedef struct
vnet_main_t *vnet_main;
u16 msg_id_base;
/* cache for last 8 geneve tunnel */
#ifdef CLIB_HAVE_VEC512
vtep4_cache_t vtep4_u512;
#endif
} geneve_main_t;
extern geneve_main_t geneve_main;
+5
View File
@@ -236,6 +236,11 @@ typedef struct
vlib_main_t *vlib_main;
vnet_main_t *vnet_main;
u32 flow_id_start;
/* cache for last 8 gtpu tunnel */
#ifdef CLIB_HAVE_VEC512
vtep4_cache_t vtep4_u512;
#endif
} gtpu_main_t;
extern gtpu_main_t gtpu_main;
+6 -10
View File
@@ -804,10 +804,6 @@ ip_gtpu_bypass_inline (vlib_main_t * vm,
vtep6_key_t last_vtep6; /* last IPv6 address / fib index
matching a local VTEP address */
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
#ifdef CLIB_HAVE_VEC512
vtep4_cache_t vtep4_u512;
clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
#endif
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -901,8 +897,8 @@ ip_gtpu_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&gtm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&gtm->vtep_table, b0, ip40, &last_vtep4,
&gtm->vtep4_u512))
#else
if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
#endif
@@ -980,8 +976,8 @@ ip_gtpu_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&gtm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&gtm->vtep_table, b1, ip41, &last_vtep4,
&gtm->vtep4_u512))
#else
if (!vtep4_check (&gtm->vtep_table, b1, ip41, &last_vtep4))
#endif
@@ -1096,8 +1092,8 @@ ip_gtpu_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&gtm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&gtm->vtep_table, b0, ip40, &last_vtep4,
&gtm->vtep4_u512))
#else
if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
#endif
+4 -6
View File
@@ -111,6 +111,7 @@ vtep4_check (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
return VTEP_CHECK_PASS;
}
#ifdef CLIB_HAVE_VEC512
typedef struct
{
vtep4_key_t vtep4_cache[8];
@@ -128,7 +129,6 @@ vtep4_check_vector (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
if (PREDICT_TRUE (k4.as_u64 == last_k4->as_u64))
return VTEP_CHECK_PASS_UNCHANGED;
#ifdef CLIB_HAVE_VEC512
u64x8 k4_u64x8 = u64x8_splat (k4.as_u64);
u64x8 cache = u64x8_load_unaligned (vtep4_u512->vtep4_cache);
u8 result = u64x8_mask_is_equal (cache, k4_u64x8);
@@ -138,20 +138,18 @@ vtep4_check_vector (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
vtep4_u512->vtep4_cache[count_trailing_zeros (result)].as_u64;
return VTEP_CHECK_PASS_UNCHANGED;
}
#endif
if (PREDICT_FALSE (!hash_get (t->vtep4, k4.as_u64)))
return VTEP_CHECK_FAIL;
last_k4->as_u64 = k4.as_u64;
#ifdef CLIB_HAVE_VEC512
vtep4_u512->vtep4_cache[vtep4_u512->idx].as_u64 = k4.as_u64;
vtep4_u512->idx = (vtep4_u512->idx + 1) & 0x7;
#endif
last_k4->as_u64 = k4.as_u64;
return VTEP_CHECK_PASS;
}
#endif
always_inline u8
vtep6_check (vtep_table_t * t, vlib_buffer_t * b0, ip6_header_t * ip60,
+6 -10
View File
@@ -793,10 +793,6 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
vtep6_key_t last_vtep6; /* last IPv6 address / fib index
matching a local VTEP address */
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
#ifdef CLIB_HAVE_VEC512
vtep4_cache_t vtep4_u512;
clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
#endif
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -889,8 +885,8 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&ngm->vtep_table, b0, ip40, &last_vtep4,
&ngm->vtep4_u512))
#else
if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
#endif
@@ -972,8 +968,8 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&ngm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&ngm->vtep_table, b1, ip41, &last_vtep4,
&ngm->vtep4_u512))
#else
if (!vtep4_check (&ngm->vtep_table, b1, ip41, &last_vtep4))
#endif
@@ -1091,8 +1087,8 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&ngm->vtep_table, b0, ip40, &last_vtep4,
&ngm->vtep4_u512))
#else
if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
#endif
+5
View File
@@ -220,6 +220,11 @@ typedef struct
/** State convenience vnet_main_t */
vnet_main_t *vnet_main;
/* cache for last 8 vxlan_gpe tunnel */
#ifdef CLIB_HAVE_VEC512
vtep4_cache_t vtep4_u512;
#endif
/** List of next nodes for the decap indexed on protocol */
uword decap_next_node_list[VXLAN_GPE_PROTOCOL_MAX];
} vxlan_gpe_main_t;
+6 -11
View File
@@ -469,11 +469,6 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
last_tunnel_cache4 last4;
last_tunnel_cache6 last6;
#ifdef CLIB_HAVE_VEC512
vtep4_cache_t vtep4_u512;
clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
#endif
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
@@ -584,8 +579,8 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
&vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
#endif
@@ -672,8 +667,8 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&vxm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&vxm->vtep_table, b1, ip41, &last_vtep4,
&vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b1, ip41, &last_vtep4))
#endif
@@ -799,8 +794,8 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
if (!vtep4_check_vector
(&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
&vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
#endif
+6
View File
@@ -189,6 +189,12 @@ typedef struct
/* Record used instances */
uword *instance_used;
u32 flow_id_start;
/* cache for last 8 vxlan tunnel */
#ifdef CLIB_HAVE_VEC512
vtep4_cache_t vtep4_u512;
#endif
} vxlan_main_t;
extern vxlan_main_t vxlan_main;