gtpu: RX offload for IPv6 payload supporting

this patch adds the offloading capability for the IPv4 GTPU
tunnel which the next-node is assigned as IPv6

Type: feature

Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
Change-Id: Ie39cd43058d36514656351dc6e906a19d5de89c0
This commit is contained in:
Chenmin Sun
2020-03-02 00:08:20 +08:00
committed by Damjan Marion
parent 350737cd45
commit ed63a0ff7b
3 changed files with 38 additions and 8 deletions

View File

@ -1097,7 +1097,7 @@ vnet_gtpu_add_del_rx_flow (u32 hw_if_index, u32 t_index, int is_add)
.redirect_node_index = gtpu4_flow_input_node.index,
.buffer_advance = sizeof (ethernet_header_t)
+ sizeof (ip4_header_t) + sizeof (udp_header_t),
.type = VNET_FLOW_TYPE_IP4_GTPU_IP4,
.type = VNET_FLOW_TYPE_IP4_GTPU,
.ip4_gtpu = {
.protocol = IP_PROTOCOL_UDP,
.src_addr.addr = t->dst.ip4,
@ -1178,10 +1178,11 @@ gtpu_offload_command_fn (vlib_main_t * vm,
if (!ip46_address_is_ip4 (&t->dst))
return clib_error_return (0, "currently only IPV4 tunnels are supported");
/* inner protocol should be IPv4 */
if (t->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT)
/* inner protocol should be IPv4/IPv6 */
if ((t->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
(t->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
return clib_error_return (0,
"currently only inner IPV4 protocol is supported");
"currently only inner IPv4/IPv6 protocol is supported");
vnet_hw_interface_t *hw_if = vnet_get_hw_interface (vnm, hw_if_index);
ip4_main_t *im = &ip4_main;

View File

@ -64,7 +64,8 @@ vl_api_gtpu_offload_rx_t_handler (vl_api_gtpu_offload_rx_t * mp)
goto err;
}
if (t->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT)
if ((t->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
(t->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
{
rv = VNET_API_ERROR_INVALID_ADDRESS_FAMILY;
goto err;

View File

@ -1212,6 +1212,7 @@ VLIB_INIT_FUNCTION (ip6_gtpu_bypass_init);
#define foreach_gtpu_flow_error \
_(NONE, "no error") \
_(PAYLOAD_ERROR, "Payload type errors") \
_(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
_(IP_HEADER_ERROR, "Rx ip header errors") \
_(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
@ -1428,7 +1429,16 @@ gtpu_flow_input (vlib_main_t * vm,
/* Pop gtpu header */
vlib_buffer_advance (b0, gtpu_hdr_len0);
next0 = GTPU_INPUT_NEXT_IP4_INPUT;
/* assign the next node */
if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
(t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
{
error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
next0 = GTPU_INPUT_NEXT_DROP;
goto trace0;
}
next0 = t0->decap_next_index;
sw_if_index0 = t0->sw_if_index;
/* Set packet input sw_if_index to unicast GTPU tunnel for learning */
@ -1500,7 +1510,16 @@ trace0:
/* Pop gtpu header */
vlib_buffer_advance (b1, gtpu_hdr_len1);
next1 = GTPU_INPUT_NEXT_IP4_INPUT;
/* assign the next node */
if (PREDICT_FALSE (t1->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
(t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
{
next1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
next1 = GTPU_INPUT_NEXT_DROP;
goto trace1;
}
next1 = t1->decap_next_index;
sw_if_index1 = t1->sw_if_index;
/* Required to make the l2 tag push / pop code work on l2 subifs */
@ -1620,7 +1639,16 @@ trace1:
/* Pop gtpu header */
vlib_buffer_advance (b0, gtpu_hdr_len0);
next0 = GTPU_INPUT_NEXT_IP4_INPUT;
/* assign the next node */
if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
(t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
{
next0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
next0 = GTPU_INPUT_NEXT_DROP;
goto trace00;
}
next0 = t0->decap_next_index;
sw_if_index0 = t0->sw_if_index;
/* Set packet input sw_if_index to unicast GTPU tunnel for learning */