vxlan: Fixed checksum caclculation offset

VXLAN uses csum_offload for IPv6 packets.

But without gso node we have csum calculated only for inner
packet.
This patch adds support for outer header csum calculation.
Checksum for inner packet should be calculated before
interface-output node (for example in vxlan node).

Type: fix

Signed-off-by: Mohsin Kazmi <sykazmi@cisco.com>
Signed-off-by: Vladimir Isaev <visaev@netgate.com>
Change-Id: Ica68429ede4426293769207cd83c791ebe72fe56
This commit is contained in:
Vladimir Isaev
2020-05-21 16:34:17 +03:00
committed by John Lo
parent e362151804
commit 698eb87a8e
11 changed files with 86 additions and 79 deletions
+1 -1
View File
@@ -376,7 +376,7 @@ vnet_generic_outer_header_parser_inline (vlib_buffer_t * b0,
u16 ethertype = 0;
u16 l2hdr_sz = 0;
ASSERT (is_ip4 ^ is_ip6);
ASSERT (!(is_ip4 && is_ip6));
if (is_l2)
{
+5 -5
View File
@@ -344,22 +344,22 @@ vnet_interface_output_node_inline (vlib_main_t * vm,
vnet_calc_checksums_inline
(vm, b[0],
b[0]->flags & VNET_BUFFER_F_IS_IP4,
b[0]->flags & VNET_BUFFER_F_IS_IP6, 1 /* with gso */ );
b[0]->flags & VNET_BUFFER_F_IS_IP6);
if (b[1]->flags & vnet_buffer_offload_flags)
vnet_calc_checksums_inline
(vm, b[1],
b[1]->flags & VNET_BUFFER_F_IS_IP4,
b[1]->flags & VNET_BUFFER_F_IS_IP6, 1 /* with gso */ );
b[1]->flags & VNET_BUFFER_F_IS_IP6);
if (b[2]->flags & vnet_buffer_offload_flags)
vnet_calc_checksums_inline
(vm, b[2],
b[2]->flags & VNET_BUFFER_F_IS_IP4,
b[2]->flags & VNET_BUFFER_F_IS_IP6, 1 /* with gso */ );
b[2]->flags & VNET_BUFFER_F_IS_IP6);
if (b[3]->flags & vnet_buffer_offload_flags)
vnet_calc_checksums_inline
(vm, b[3],
b[3]->flags & VNET_BUFFER_F_IS_IP4,
b[3]->flags & VNET_BUFFER_F_IS_IP6, 1 /* with gso */ );
b[3]->flags & VNET_BUFFER_F_IS_IP6);
}
}
b += 4;
@@ -410,7 +410,7 @@ vnet_interface_output_node_inline (vlib_main_t * vm,
vnet_calc_checksums_inline
(vm, b[0],
b[0]->flags & VNET_BUFFER_F_IS_IP4,
b[0]->flags & VNET_BUFFER_F_IS_IP6, 1 /* with gso */ );
b[0]->flags & VNET_BUFFER_F_IS_IP6);
}
b += 1;
}
+13 -43
View File
@@ -82,59 +82,29 @@ vnet_calc_ip6_checksums (vlib_main_t * vm, vlib_buffer_t * b,
static_always_inline void
vnet_calc_checksums_inline (vlib_main_t * vm, vlib_buffer_t * b,
int is_ip4, int is_ip6, int with_gso)
int is_ip4, int is_ip6)
{
ip4_header_t *ip4;
ip6_header_t *ip6;
tcp_header_t *th;
udp_header_t *uh;
if (with_gso)
ASSERT (!(is_ip4 && is_ip6));
ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
ip6 = (ip6_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
th = (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
uh = (udp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
if (is_ip4)
{
generic_header_offset_t gho = { 0 };
vnet_generic_header_offset_parser (b, &gho, 1 /* l2 */ , is_ip4,
is_ip6);
ASSERT (gho.gho_flags ^ (GHO_F_IP4 | GHO_F_IP6));
vnet_get_inner_header (b, &gho);
ip4 = (ip4_header_t *)
(vlib_buffer_get_current (b) + gho.l3_hdr_offset);
ip6 = (ip6_header_t *)
(vlib_buffer_get_current (b) + gho.l3_hdr_offset);
th = (tcp_header_t *) (vlib_buffer_get_current (b) + gho.l4_hdr_offset);
uh = (udp_header_t *) (vlib_buffer_get_current (b) + gho.l4_hdr_offset);
if (gho.gho_flags & GHO_F_IP4)
{
vnet_calc_ip4_checksums (vm, b, ip4, th, uh);
}
else if (gho.gho_flags & GHO_F_IP6)
{
vnet_calc_ip6_checksums (vm, b, ip6, th, uh);
}
vnet_get_outer_header (b, &gho);
vnet_calc_ip4_checksums (vm, b, ip4, th, uh);
}
else
else if (is_ip6)
{
ASSERT (!(is_ip4 && is_ip6));
ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
ip6 = (ip6_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
th = (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
uh = (udp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
if (is_ip4)
{
vnet_calc_ip4_checksums (vm, b, ip4, th, uh);
}
if (is_ip6)
{
vnet_calc_ip6_checksums (vm, b, ip6, th, uh);
}
vnet_calc_ip6_checksums (vm, b, ip6, th, uh);
}
b->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
b->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
+4 -8
View File
@@ -2241,8 +2241,7 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
next[0] = next_index;
if (is_midchain)
vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
0 /* is_ip6 */ ,
0 /* with gso */ );
0 /* is_ip6 */ );
}
else
{
@@ -2267,8 +2266,7 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
next[1] = next_index;
if (is_midchain)
vnet_calc_checksums_inline (vm, b[1], 1 /* is_ip4 */ ,
0 /* is_ip6 */ ,
0 /* with gso */ );
0 /* is_ip6 */ );
}
else
{
@@ -2419,8 +2417,7 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
if (is_midchain)
{
vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
0 /* is_ip6 */ ,
0 /* with gso */ );
0 /* is_ip6 */ );
/* Guess we are only writing on ipv4 header. */
vnet_rewrite_one_header (adj0[0], ip0, sizeof (ip4_header_t));
@@ -2527,8 +2524,7 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
{
/* this acts on the packet that is about to be encapped */
vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
0 /* is_ip6 */ ,
0 /* with gso */ );
0 /* is_ip6 */ );
/* Guess we are only writing on ipv4 header. */
vnet_rewrite_one_header (adj0[0], ip0, sizeof (ip4_header_t));
+3 -6
View File
@@ -1936,11 +1936,9 @@ ip6_rewrite_inline_with_gso (vlib_main_t * vm,
/* before we paint on the next header, update the L4
* checksums if required, since there's no offload on a tunnel */
vnet_calc_checksums_inline (vm, p0, 0 /* is_ip4 */ ,
1 /* is_ip6 */ ,
0 /* with gso */ );
1 /* is_ip6 */ );
vnet_calc_checksums_inline (vm, p1, 0 /* is_ip4 */ ,
1 /* is_ip6 */ ,
0 /* with gso */ );
1 /* is_ip6 */ );
/* Guess we are only writing on ipv6 header. */
vnet_rewrite_two_headers (adj0[0], adj1[0],
@@ -2036,8 +2034,7 @@ ip6_rewrite_inline_with_gso (vlib_main_t * vm,
if (is_midchain)
{
vnet_calc_checksums_inline (vm, p0, 0 /* is_ip4 */ ,
1 /* is_ip6 */ ,
0 /* with gso */ );
1 /* is_ip6 */ );
/* Guess we are only writing on ip6 header. */
vnet_rewrite_one_header (adj0[0], ip0, sizeof (ip6_header_t));
+19 -10
View File
@@ -1530,10 +1530,9 @@ pg_input_trace (pg_main_t * pg,
}
static_always_inline void
fill_gso_buffer_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
u32 packet_data_size)
fill_buffer_offload_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
int gso_enabled, u32 gso_size)
{
for (int i = 0; i < n_buffers; i++)
{
vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[i]);
@@ -1586,16 +1585,21 @@ fill_gso_buffer_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
}
if (l4_proto == IP_PROTOCOL_TCP)
{
b0->flags |= (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_GSO);
b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b0) +
vnet_buffer
(b0)->l4_hdr_offset);
l4_hdr_sz = tcp_header_bytes (tcp);
tcp->checksum = 0;
vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
vnet_buffer2 (b0)->gso_size = packet_data_size;
if (gso_enabled)
{
b0->flags |= VNET_BUFFER_F_GSO;
l4_hdr_sz = tcp_header_bytes (tcp);
vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
vnet_buffer2 (b0)->gso_size = gso_size;
}
}
else if (l4_proto == IP_PROTOCOL_UDP)
{
@@ -1603,7 +1607,6 @@ fill_gso_buffer_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
udp_header_t *udp = (udp_header_t *) (vlib_buffer_get_current (b0) +
vnet_buffer
(b0)->l4_hdr_offset);
vnet_buffer2 (b0)->gso_l4_hdr_sz = sizeof (*udp);
udp->checksum = 0;
}
}
@@ -1700,8 +1703,14 @@ pg_generate_packets (vlib_node_runtime_t * node,
vnet_buffer (b)->feature_arc_index = feature_arc_index;
}
if (pi->gso_enabled)
fill_gso_buffer_flags (vm, to_next, n_this_frame, pi->gso_size);
if (pi->gso_enabled ||
(s->buffer_flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
VNET_BUFFER_F_OFFLOAD_IP_CKSUM)))
{
fill_buffer_offload_flags (vm, to_next, n_this_frame,
pi->gso_enabled, pi->gso_size);
}
n_trace = vlib_get_trace_count (vm, node);
if (n_trace > 0)
+2 -2
View File
@@ -444,7 +444,7 @@ class TestGSO(VppTestCase):
self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
self.assertEqual(rx[IPv6].src, self.pg0.local_ip6)
self.assertEqual(rx[IPv6].dst, self.pg0.remote_ip6)
self.assert_udp_checksum_valid(rx)
self.assert_udp_checksum_valid(rx, ignore_zero_checksum=False)
self.assertEqual(rx[VXLAN].vni, 10)
inner = rx[VXLAN].payload
self.assertEqual(rx[IPv6].plen - 8 - 8, len(inner))
@@ -474,7 +474,7 @@ class TestGSO(VppTestCase):
self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
self.assertEqual(rx[IPv6].src, self.pg0.local_ip6)
self.assertEqual(rx[IPv6].dst, self.pg0.remote_ip6)
self.assert_udp_checksum_valid(rx)
self.assert_udp_checksum_valid(rx, ignore_zero_checksum=False)
self.assertEqual(rx[VXLAN].vni, 10)
inner = rx[VXLAN].payload
self.assertEqual(rx[IPv6].plen - 8 - 8, len(inner))
+2 -1
View File
@@ -80,7 +80,8 @@ class TestVxlan(BridgeDomain, VppTestCase):
# Verify UDP destination port is VXLAN 4789, source UDP port could be
# arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
# TODO: checksum check
# Verify UDP checksum
self.assert_udp_checksum_valid(pkt)
# Verify VNI
self.assertEqual(pkt[VXLAN].vni, vni)
+33 -2
View File
@@ -6,7 +6,8 @@ from framework import VppTestCase, VppTestRunner
from template_bd import BridgeDomain
from scapy.layers.l2 import Ether
from scapy.layers.inet6 import IPv6, UDP
from scapy.packet import Raw
from scapy.layers.inet6 import IP, IPv6, UDP
from scapy.layers.vxlan import VXLAN
import util
@@ -80,7 +81,8 @@ class TestVxlan6(BridgeDomain, VppTestCase):
# Verify UDP destination port is VXLAN 4789, source UDP port could be
# arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
# TODO: checksum check
# Verify UDP checksum
self.assert_udp_checksum_valid(pkt, ignore_zero_checksum=False)
# Verify VNI
self.assertEqual(pkt[VXLAN].vni, vni)
@@ -190,6 +192,35 @@ class TestVxlan6(BridgeDomain, VppTestCase):
self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
self.logger.info(self.vapi.cli("show vxlan tunnel"))
def test_encap_fragmented_packet(self):
""" Encapsulation test send fragments from pg1
Verify receipt of encapsulated frames on pg0
"""
frame = (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
IP(src='4.3.2.1', dst='1.2.3.4') /
UDP(sport=20000, dport=10000) /
Raw(b'\xa5' * 1000))
frags = util.fragment_rfc791(frame, 400)
self.pg1.add_stream(frags)
self.pg0.enable_capture()
self.pg_start()
out = self.pg0.get_capture(3)
payload = []
for pkt in out:
payload.append(self.decapsulate(pkt))
self.check_encapsulation(pkt, self.single_tunnel_vni)
reassembled = util.reassemble4(payload)
self.assertEqual(Ether(raw(frame))[IP], reassembled[IP])
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
+2 -1
View File
@@ -80,7 +80,8 @@ class TestVxlanGbp(VppTestCase):
# Verify UDP destination port is VXLAN GBP 48879, source UDP port could
# be arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
# TODO: checksum check
# Verify UDP checksum
self.assert_udp_checksum_valid(pkt)
# Verify VNI
# pkt.show()
self.assertEqual(pkt[VXLAN].vni, vni)
+2
View File
@@ -80,6 +80,8 @@ class TestVxlanGpe(BridgeDomain, VppTestCase):
# Verify UDP destination port is VXLAN-GPE 4790, source UDP port
# could be arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
# Verify UDP checksum
self.assert_udp_checksum_valid(pkt)
# Verify VNI
self.assertEqual(pkt[VXLAN].vni, vni)