ip: functional interface to ip fragmentation

This provides a functional interface to IP fragmentation.
Allowing external features to fragment. Supports
arbitrary encap size, for e.g. MPLS or inner fragmentation
of tunnels.

This also removed dual loop in MAP that was fundamentally broken.

Type: fix
Signed-off-by: Ole Troan <ot@cisco.com>
Change-Id: Ia89ecec8ee3cbe2416edbe87630fdb714898c2a8
Signed-off-by: Ole Troan <ot@cisco.com>
This commit is contained in:
Ole Troan
2019-10-09 13:33:19 +02:00
committed by Neale Ranns
parent d318a996b7
commit eb284a1f8f
12 changed files with 476 additions and 412 deletions

View File

File diff suppressed because it is too large Load Diff

View File

@@ -168,7 +168,7 @@ ip4_map_t_icmp (vlib_main_t * vm,
if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
{
vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
vnet_buffer (p0)->ip_frag.next_index = IP_FRAG_NEXT_IP6_LOOKUP;
next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
}
err0:
@@ -287,7 +287,7 @@ ip4_map_t_fragmented (vlib_main_t * vm,
{
vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
vnet_buffer (p0)->ip_frag.next_index =
IP6_FRAG_NEXT_IP6_LOOKUP;
IP_FRAG_NEXT_IP6_LOOKUP;
next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
}
}
@@ -453,7 +453,7 @@ ip4_map_t_tcp_udp (vlib_main_t * vm,
//Send to fragmentation node if necessary
vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
vnet_buffer (p0)->ip_frag.next_index =
IP6_FRAG_NEXT_IP6_LOOKUP;
IP_FRAG_NEXT_IP6_LOOKUP;
next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
}
}

View File

@@ -314,7 +314,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
{
vnet_buffer (p0)->ip_frag.flags = 0;
vnet_buffer (p0)->ip_frag.next_index =
IP4_FRAG_NEXT_IP4_LOOKUP;
IP_FRAG_NEXT_IP4_LOOKUP;
vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
}
@@ -346,7 +346,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
{
vnet_buffer (p1)->ip_frag.flags = 0;
vnet_buffer (p1)->ip_frag.next_index =
IP4_FRAG_NEXT_IP4_LOOKUP;
IP_FRAG_NEXT_IP4_LOOKUP;
vnet_buffer (p1)->ip_frag.mtu = d1->mtu;
next1 = IP6_MAP_NEXT_IP4_FRAGMENT;
}
@@ -497,7 +497,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
{
vnet_buffer (p0)->ip_frag.flags = 0;
vnet_buffer (p0)->ip_frag.next_index =
IP4_FRAG_NEXT_IP4_LOOKUP;
IP_FRAG_NEXT_IP4_LOOKUP;
vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
}
@@ -622,7 +622,7 @@ ip6_map_post_ip4_reass (vlib_main_t * vm,
&& error0 == MAP_ERROR_NONE))
{
vnet_buffer (p0)->ip_frag.flags = 0;
vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
vnet_buffer (p0)->ip_frag.next_index = IP_FRAG_NEXT_IP4_LOOKUP;
vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
next0 = IP6_MAP_POST_IP4_REASS_NEXT_IP4_FRAGMENT;
}

View File

@@ -169,7 +169,7 @@ ip6_map_t_icmp (vlib_main_t * vm,
{
// Send to fragmentation node if necessary
vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
vnet_buffer (p0)->ip_frag.next_index = IP_FRAG_NEXT_IP4_LOOKUP;
next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
}
err0:
@@ -288,7 +288,7 @@ ip6_map_t_fragmented (vlib_main_t * vm,
// Send to fragmentation node if necessary
vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
vnet_buffer (p0)->ip_frag.next_index =
IP4_FRAG_NEXT_IP4_LOOKUP;
IP_FRAG_NEXT_IP4_LOOKUP;
next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
}
}
@@ -441,7 +441,7 @@ ip6_map_t_tcp_udp (vlib_main_t * vm,
// Send to fragmentation node if necessary
vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
vnet_buffer (p0)->ip_frag.next_index =
IP4_FRAG_NEXT_IP4_LOOKUP;
IP_FRAG_NEXT_IP4_LOOKUP;
next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
}
}

View File

@@ -140,7 +140,7 @@ class TestMAP(VppTestCase):
IP(src=self.pg0.remote_ip4, dst=self.pg0.remote_ip4) /
UDP(sport=20000, dport=10000) /
Raw(b'\xa5' * 100))
rx = self.send_and_expect(self.pg0, v4*1, self.pg0)
rx = self.send_and_expect(self.pg0, v4 * 4, self.pg0)
v4_reply = v4[1]
v4_reply.ttl -= 1
for p in rx:
@@ -154,7 +154,7 @@ class TestMAP(VppTestCase):
UDP(sport=20000, dport=10000) /
Raw(b'\xa5' * 100))
self.send_and_assert_encapped_one(v4, "3000::1", map_translated_addr)
self.send_and_assert_encapped(v4 * 4, "3000::1", map_translated_addr)
#
# Verify reordered fragments are able to pass as well
@@ -294,6 +294,76 @@ class TestMAP(VppTestCase):
pre_res_route.remove_vpp_config()
self.vapi.ppcli("map params pre-resolve del ip6-nh 4001::1")
def test_map_e_inner_frag(self):
""" MAP-E Inner fragmentation """
#
# Add a route to the MAP-BR
#
map_br_pfx = "2001::"
map_br_pfx_len = 32
map_route = VppIpRoute(self,
map_br_pfx,
map_br_pfx_len,
[VppRoutePath(self.pg1.remote_ip6,
self.pg1.sw_if_index)])
map_route.add_vpp_config()
#
# Add a domain that maps from pg0 to pg1
#
map_dst = '2001::/32'
map_src = '3000::1/128'
client_pfx = '192.168.0.0/16'
map_translated_addr = '2001:0:101:7000:0:c0a8:101:7'
tag = 'MAP-E tag.'
self.vapi.map_add_domain(ip4_prefix=client_pfx,
ip6_prefix=map_dst,
ip6_src=map_src,
ea_bits_len=20,
psid_offset=4,
psid_length=4,
mtu=1000,
tag=tag)
# Enable MAP on interface.
self.vapi.map_if_enable_disable(is_enable=1,
sw_if_index=self.pg0.sw_if_index,
is_translation=0)
# Enable inner fragmentation
self.vapi.map_param_set_fragmentation(inner=1)
v4 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4, dst='192.168.1.1') /
UDP(sport=20000, dport=10000) /
Raw(b'\xa5' * 1300))
self.pg_send(self.pg0, v4*1)
rx = self.pg1.get_capture(2)
frags = fragment_rfc791(v4[1], 1000)
frags[0].id = 0
frags[1].id = 0
frags[0].ttl -= 1
frags[1].ttl -= 1
frags[0].chksum = 0
frags[1].chksum = 0
v6_reply1 = (IPv6(src='3000::1', dst=map_translated_addr, hlim=63) /
frags[0])
v6_reply2 = (IPv6(src='3000::1', dst=map_translated_addr, hlim=63) /
frags[1])
rx[0][1].fl = 0
rx[1][1].fl = 0
rx[0][1][IP].id = 0
rx[1][1][IP].id = 0
rx[0][1][IP].chksum = 0
rx[1][1][IP].chksum = 0
self.validate(rx[0][1], v6_reply1)
self.validate(rx[1][1], v6_reply2)
def validate(self, rx, expected):
self.assertEqual(rx, expected.__class__(scapy.compat.raw(expected)))

View File

@@ -2293,8 +2293,8 @@ typedef enum
always_inline void
ip4_mtu_check (vlib_buffer_t * b, u16 packet_len,
u16 adj_packet_bytes, bool df, u16 * next, u32 * error,
u8 is_midchain)
u16 adj_packet_bytes, bool df, u16 * next,
u8 is_midchain, u32 * error)
{
if (packet_len > adj_packet_bytes)
{
@@ -2312,8 +2312,8 @@ ip4_mtu_check (vlib_buffer_t * b, u16 packet_len,
/* IP fragmentation */
ip_frag_set_vnet_buffer (b, adj_packet_bytes,
(is_midchain ?
IP4_FRAG_NEXT_IP4_REWRITE_MIDCHAIN :
IP4_FRAG_NEXT_IP4_REWRITE), 0);
IP_FRAG_NEXT_IP_REWRITE_MIDCHAIN :
IP_FRAG_NEXT_IP_REWRITE), 0);
*next = IP4_REWRITE_NEXT_FRAGMENT;
}
}
@@ -2486,12 +2486,12 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
adj0[0].rewrite_header.max_l3_packet_bytes,
ip0->flags_and_fragment_offset &
clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
next + 0, &error0, is_midchain);
next + 0, is_midchain, &error0);
ip4_mtu_check (b[1], ip1_len,
adj1[0].rewrite_header.max_l3_packet_bytes,
ip1->flags_and_fragment_offset &
clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
next + 1, &error1, is_midchain);
next + 1, is_midchain, &error1);
if (is_mcast)
{
@@ -2660,7 +2660,7 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
adj0[0].rewrite_header.max_l3_packet_bytes,
ip0->flags_and_fragment_offset &
clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
next + 0, &error0, is_midchain);
next + 0, is_midchain, &error0);
if (is_mcast)
{
@@ -2758,7 +2758,7 @@ ip4_rewrite_inline_with_gso (vlib_main_t * vm,
adj0[0].rewrite_header.max_l3_packet_bytes,
ip0->flags_and_fragment_offset &
clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
next + 0, &error0, is_midchain);
next + 0, is_midchain, &error0);
if (is_mcast)
{

View File

@@ -1652,7 +1652,7 @@ typedef enum
always_inline void
ip6_mtu_check (vlib_buffer_t * b, u16 packet_bytes,
u16 adj_packet_bytes, bool is_locally_generated,
u32 * next, u32 * error)
u32 * next, u8 is_midchain, u32 * error)
{
if (adj_packet_bytes >= 1280 && packet_bytes > adj_packet_bytes)
{
@@ -1660,7 +1660,9 @@ ip6_mtu_check (vlib_buffer_t * b, u16 packet_bytes,
{
/* IP fragmentation */
ip_frag_set_vnet_buffer (b, adj_packet_bytes,
IP6_FRAG_NEXT_IP6_REWRITE, 0);
(is_midchain ?
IP_FRAG_NEXT_IP_REWRITE_MIDCHAIN :
IP_FRAG_NEXT_IP_REWRITE), 0);
*next = IP6_REWRITE_NEXT_FRAGMENT;
*error = IP6_ERROR_MTU_EXCEEDED;
}
@@ -1840,10 +1842,12 @@ ip6_rewrite_inline_with_gso (vlib_main_t * vm,
ip6_mtu_check (p0, ip0_len,
adj0[0].rewrite_header.max_l3_packet_bytes,
is_locally_originated0, &next0, &error0);
is_locally_originated0, &next0, is_midchain,
&error0);
ip6_mtu_check (p1, ip1_len,
adj1[0].rewrite_header.max_l3_packet_bytes,
is_locally_originated1, &next1, &error1);
is_locally_originated1, &next1, is_midchain,
&error1);
/* Don't adjust the buffer for hop count issue; icmp-error node
* wants to see the IP header */
@@ -2011,7 +2015,8 @@ ip6_rewrite_inline_with_gso (vlib_main_t * vm,
ip6_mtu_check (p0, ip0_len,
adj0[0].rewrite_header.max_l3_packet_bytes,
is_locally_originated0, &next0, &error0);
is_locally_originated0, &next0, is_midchain,
&error0);
/* Don't adjust the buffer for hop count issue; icmp-error node
* wants to see the IP header */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -39,7 +39,6 @@
#define IP_FRAG_FLAG_IP4_HEADER 0x01 //Encapsulating IPv4 header
#define IP_FRAG_FLAG_IP6_HEADER 0x02 //Encapsulating IPv6 header
#define IP_FRAG_FLAG_MPLS_HEADER 0x04 //Encapsulating MPLS header
#define IP4_FRAG_NODE_NAME "ip4-frag"
#define IP6_FRAG_NODE_NAME "ip6-frag"
@@ -49,26 +48,14 @@ extern vlib_node_registration_t ip6_frag_node;
typedef enum
{
IP4_FRAG_NEXT_IP4_REWRITE,
IP4_FRAG_NEXT_IP4_REWRITE_MIDCHAIN,
IP4_FRAG_NEXT_IP4_LOOKUP,
IP4_FRAG_NEXT_IP6_LOOKUP,
IP4_FRAG_NEXT_MPLS_OUTPUT,
IP4_FRAG_NEXT_ICMP_ERROR,
IP4_FRAG_NEXT_DROP,
IP4_FRAG_N_NEXT
} ip4_frag_next_t;
typedef enum
{
IP6_FRAG_NEXT_IP4_LOOKUP,
IP6_FRAG_NEXT_IP6_LOOKUP,
IP6_FRAG_NEXT_IP6_REWRITE,
IP6_FRAG_NEXT_IP6_REWRITE_MIDCHAIN,
IP6_FRAG_NEXT_MPLS_OUTPUT,
IP6_FRAG_NEXT_DROP,
IP6_FRAG_N_NEXT
} ip6_frag_next_t;
IP_FRAG_NEXT_IP_REWRITE,
IP_FRAG_NEXT_IP_REWRITE_MIDCHAIN,
IP_FRAG_NEXT_IP4_LOOKUP,
IP_FRAG_NEXT_IP6_LOOKUP,
IP_FRAG_NEXT_ICMP_ERROR,
IP_FRAG_NEXT_DROP,
IP_FRAG_N_NEXT
} ip_frag_next_t;
#define foreach_ip_frag_error \
/* Must be first. */ \
@@ -91,12 +78,16 @@ typedef enum
void ip_frag_set_vnet_buffer (vlib_buffer_t * b, u16 mtu,
u8 next_index, u8 flags);
void
ip4_frag_do_fragment (vlib_main_t * vm, u32 pi, u32 ** buffer,
ip_frag_error_t * error);
void
ip6_frag_do_fragment (vlib_main_t * vm, u32 pi, u32 ** buffer,
ip_frag_error_t * error);
extern ip_frag_error_t ip4_frag_do_fragment (vlib_main_t * vm,
u32 from_bi,
u16 mtu,
u16 encapsize, u32 ** buffer);
extern ip_frag_error_t ip6_frag_do_fragment (vlib_main_t * vm,
u32 from_bi,
u16 mtu,
u16 encapsize, u32 ** buffer);
#endif /* ifndef IP_FRAG_H */
/*

View File

@@ -34,8 +34,7 @@ typedef enum {
#define foreach_mpls_output_next \
_(DROP, "error-drop") \
_(IP4_FRAG, "ip4-frag") \
_(IP6_FRAG, "ip6-frag")
_(FRAG, "mpls-frag")
typedef enum {
#define _(s,n) MPLS_OUTPUT_NEXT_##s,
@@ -58,31 +57,6 @@ format_mpls_output_trace (u8 * s, va_list * args)
return s;
}
/*
* Save the mpls header length and adjust the current to ip header
*/
static inline u32
set_mpls_fragmentation(vlib_buffer_t * p0, ip_adjacency_t * adj0)
{
u32 next0;
/* advance size of (all) mpls header to ip header before fragmenting */
/* save the current pointing to first mpls header. */
vnet_buffer (p0)->mpls.mpls_hdr_length = vnet_buffer(p0)->l3_hdr_offset - p0->current_data;
vlib_buffer_advance (p0, vnet_buffer (p0)->mpls.mpls_hdr_length);
/* IP fragmentation */
ip_frag_set_vnet_buffer (p0, adj0[0].rewrite_header.max_l3_packet_bytes,
IP4_FRAG_NEXT_MPLS_OUTPUT,
((vnet_buffer (p0)->mpls.pyld_proto == DPO_PROTO_IP4) ? IP_FRAG_FLAG_IP4_HEADER:IP_FRAG_FLAG_IP6_HEADER));
/* Tell ip_frag to retain certain mpls parameters after fragmentation of mpls packet */
vnet_buffer (p0)->ip_frag.flags = (vnet_buffer (p0)->ip_frag.flags | IP_FRAG_FLAG_MPLS_HEADER);
next0 = (vnet_buffer (p0)->mpls.pyld_proto == DPO_PROTO_IP4)? MPLS_OUTPUT_NEXT_IP4_FRAG:MPLS_OUTPUT_NEXT_IP6_FRAG;
return next0;
}
static inline uword
mpls_output_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -196,7 +170,7 @@ mpls_output_inline (vlib_main_t * vm,
else
{
error0 = IP4_ERROR_MTU_EXCEEDED;
next0 = set_mpls_fragmentation (p0, adj0);
next0 = MPLS_OUTPUT_NEXT_FRAG;
vlib_node_increment_counter (vm, mpls_output_node.index,
MPLS_ERROR_PKTS_NEED_FRAG,
1);
@@ -219,7 +193,7 @@ mpls_output_inline (vlib_main_t * vm,
else
{
error1 = IP4_ERROR_MTU_EXCEEDED;
next1 = set_mpls_fragmentation (p1, adj1);
next1 = MPLS_OUTPUT_NEXT_FRAG;
vlib_node_increment_counter (vm, mpls_output_node.index,
MPLS_ERROR_PKTS_NEED_FRAG,
1);
@@ -308,7 +282,7 @@ mpls_output_inline (vlib_main_t * vm,
else
{
error0 = IP4_ERROR_MTU_EXCEEDED;
next0 = set_mpls_fragmentation (p0, adj0);
next0 = MPLS_OUTPUT_NEXT_FRAG;
vlib_node_increment_counter (vm, mpls_output_node.index,
MPLS_ERROR_PKTS_NEED_FRAG,
1);
@@ -371,11 +345,9 @@ VLIB_REGISTER_NODE (mpls_output_node) = {
.n_next_nodes = MPLS_OUTPUT_N_NEXT,
.next_nodes = {
#define _(s,n) [MPLS_OUTPUT_NEXT_##s] = n,
foreach_mpls_output_next
#undef _
},
[MPLS_OUTPUT_NEXT_DROP] = "mpls-drop",
[MPLS_OUTPUT_NEXT_FRAG] = "mpls-frag",
},
.format_trace = format_mpls_output_trace,
};
@@ -390,12 +362,184 @@ VLIB_REGISTER_NODE (mpls_midchain_node) = {
.name = "mpls-midchain",
.vector_size = sizeof (u32),
.format_trace = format_mpls_output_trace,
.n_errors = MPLS_N_ERROR,
.error_strings = mpls_error_strings,
.sibling_of = "mpls-output",
.format_trace = format_mpls_output_trace,
};
/**
static char *mpls_frag_error_strings[] = {
#define _(sym,string) string,
foreach_ip_frag_error
#undef _
};
typedef struct mpls_frag_trace_t_
{
u16 pkt_size;
u16 mtu;
} mpls_frag_trace_t;
typedef enum
{
MPLS_FRAG_NEXT_REWRITE,
MPLS_FRAG_NEXT_REWRITE_MIDCHAIN,
MPLS_FRAG_NEXT_ICMP_ERROR,
MPLS_FRAG_NEXT_DROP,
MPLS_FRAG_N_NEXT,
} mpls_frag_next_t;
static uword
mpls_frag (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
u32 n_left_from, next_index, * from, * to_next, n_left_to_next, *frags;
vlib_node_runtime_t * error_node;
error_node = vlib_node_get_runtime (vm, mpls_output_node.index);
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
frags = NULL;
while (n_left_from > 0)
{
vlib_get_next_frame (vm, node, next_index,
to_next, n_left_to_next);
while (n_left_from > 0 && n_left_to_next > 0)
{
ip_adjacency_t * adj0;
vlib_buffer_t * p0;
mpls_frag_next_t next0;
u32 pi0, adj_index0;
ip_frag_error_t error0 = IP_FRAG_ERROR_NONE;
i16 encap_size;
u8 is_ip4;
pi0 = to_next[0] = from[0];
p0 = vlib_get_buffer (vm, pi0);
from += 1;
n_left_from -= 1;
is_ip4 = vnet_buffer (p0)->mpls.pyld_proto == DPO_PROTO_IP4;
adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
adj0 = adj_get(adj_index0);
/* the size of the MPLS stack */
encap_size = vnet_buffer(p0)->l3_hdr_offset - p0->current_data;
/* IP fragmentation */
if (is_ip4)
error0 = ip4_frag_do_fragment (vm, pi0,
adj0->rewrite_header.max_l3_packet_bytes,
encap_size, &frags);
else
error0 = ip6_frag_do_fragment (vm, pi0,
adj0->rewrite_header.max_l3_packet_bytes,
encap_size, &frags);
if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
{
mpls_frag_trace_t *tr =
vlib_add_trace (vm, node, p0, sizeof (*tr));
tr->mtu = adj0->rewrite_header.max_l3_packet_bytes;
tr->pkt_size = vlib_buffer_length_in_chain(vm, p0);
}
if (PREDICT_TRUE(error0 == IP_FRAG_ERROR_NONE))
{
/* Free original buffer chain */
vlib_buffer_free_one (vm, pi0); /* Free original packet */
next0 = (IP_LOOKUP_NEXT_MIDCHAIN == adj0->lookup_next_index ?
MPLS_FRAG_NEXT_REWRITE_MIDCHAIN :
MPLS_FRAG_NEXT_REWRITE);
}
else if (is_ip4 && error0 == IP_FRAG_ERROR_DONT_FRAGMENT_SET)
{
icmp4_error_set_vnet_buffer (
p0, ICMP4_destination_unreachable,
ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
vnet_buffer (p0)->ip_frag.mtu);
next0 = MPLS_FRAG_NEXT_ICMP_ERROR;
}
else
{
vlib_error_count (vm, next_index, error0, 1);
vec_add1 (frags, pi0); /* Get rid of the original buffer */
next0 = MPLS_FRAG_NEXT_DROP;
}
/* Send fragments that were added in the frame */
u32 *frag_from, frag_left;
frag_from = frags;
frag_left = vec_len (frags);
while (frag_left > 0)
{
while (frag_left > 0 && n_left_to_next > 0)
{
u32 i;
i = to_next[0] = frag_from[0];
frag_from += 1;
frag_left -= 1;
to_next += 1;
n_left_to_next -= 1;
p0 = vlib_get_buffer (vm, i);
p0->error = error_node->errors[error0];
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next, i,
next0);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
vlib_get_next_frame (vm, node, next_index, to_next,
n_left_to_next);
}
vec_reset_length (frags);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
vec_free (frags);
return frame->n_vectors;
}
static u8 *
format_mpls_frag_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
mpls_frag_trace_t *t = va_arg (*args, mpls_frag_trace_t *);
s = format (s, "mtu:%d pkt-size:%d", t->mtu, t->pkt_size);
return s;
}
VLIB_REGISTER_NODE (mpls_frag_node) = {
.function = mpls_frag,
.name = "mpls-frag",
.vector_size = sizeof (u32),
.format_trace = format_mpls_frag_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
.n_errors = IP_FRAG_N_ERROR,
.error_strings = mpls_frag_error_strings,
.n_next_nodes = MPLS_FRAG_N_NEXT,
.next_nodes = {
[MPLS_FRAG_NEXT_REWRITE] = "mpls-output",
[MPLS_FRAG_NEXT_REWRITE_MIDCHAIN] = "mpls-midchain",
[MPLS_FRAG_NEXT_ICMP_ERROR] = "ip4-icmp-error",
[MPLS_FRAG_NEXT_DROP] = "mpls-drop"
},
};
/*
* @brief Next index values from the MPLS incomplete adj node
*/
#define foreach_mpls_adj_incomplete_next \

View File

@@ -154,7 +154,8 @@ class TestMPLS(VppTestCase):
pkts.append(p)
return pkts
def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0):
def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64,
ip_dscp=0, payload_size=None):
self.reset_packet_infos()
pkts = []
for i in range(0, 257):
@@ -166,6 +167,8 @@ class TestMPLS(VppTestCase):
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
if payload_size:
self.extend_packet(p, payload_size)
pkts.append(p)
return pkts
@@ -911,7 +914,7 @@ class TestMPLS(VppTestCase):
""" MPLS Tunnel Tests - Pipe """
#
# Create a tunnel with a single out label
# Create a tunnel with two out labels
#
mpls_tun = VppMPLSTunnelInterface(
self,
@@ -964,6 +967,38 @@ class TestMPLS(VppTestCase):
VppMplsLabel(46),
VppMplsLabel(33, ttl=255)])
#
# change tunnel's MTU to a low value
#
mpls_tun.set_l3_mtu(1200)
# send IP into the tunnel to be fragmented
tx = self.create_stream_ip4(self.pg0, "10.0.0.3",
payload_size=1500)
rx = self.send_and_expect(self.pg0, tx, self.pg0, len(tx)*2)
fake_tx = []
for p in tx:
fake_tx.append(p)
fake_tx.append(p)
self.verify_capture_tunneled_ip4(self.pg0, rx, fake_tx,
[VppMplsLabel(44),
VppMplsLabel(46)])
# send MPLS into the tunnel to be fragmented
tx = self.create_stream_ip4(self.pg0, "10.0.0.4",
payload_size=1500)
rx = self.send_and_expect(self.pg0, tx, self.pg0, len(tx)*2)
fake_tx = []
for p in tx:
fake_tx.append(p)
fake_tx.append(p)
self.verify_capture_tunneled_ip4(self.pg0, rx, fake_tx,
[VppMplsLabel(44),
VppMplsLabel(46),
VppMplsLabel(33, ttl=255)])
def test_tunnel_uniform(self):
""" MPLS Tunnel Tests - Uniform """

View File

@@ -495,3 +495,15 @@ class VppInterface(object):
def get_tx_stats(self):
c = self.test.statistics.get_counter("^/if/tx$")
return c[0][self.sw_if_index]
def set_l3_mtu(self, mtu):
self.test.vapi.sw_interface_set_mtu(self.sw_if_index, [mtu, 0, 0, 0])
def set_ip4_mtu(self, mtu):
self.test.vapi.sw_interface_set_mtu(self.sw_if_index, [0, mtu, 0, 0])
def set_ip6_mtu(self, mtu):
self.test.vapi.sw_interface_set_mtu(self.sw_if_index, [0, 0, mtu, 0])
def set_mpls_mtu(self, mtu):
self.test.vapi.sw_interface_set_mtu(self.sw_if_index, [0, 0, 0, mtu])