MPLS Exp-null Tests

Add some 'make test' unit tests for MPLS explicit NULL label handling.
Fix the stacking of the MPLS load-balance result form the lookup onto the IPx lookup object.

Change-Id: I890d1221b8e3dea99bcc714ed9d0154a5f602c52
Signed-off-by: Neale Ranns <nranns@cisco.com>
This commit is contained in:
Neale Ranns
2016-11-01 10:05:08 +00:00
committed by Damjan Marion
parent 3b906b0d9b
commit 8fe8cc21d1
9 changed files with 289 additions and 48 deletions
+2 -1
View File
@@ -113,7 +113,8 @@ class VppTestCase(unittest.TestCase):
cls.set_debug_flags(d)
cls.vpp_bin = os.getenv('VPP_TEST_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.vpp_cmdline = [cls.vpp_bin, "unix", "nodaemon",
cls.vpp_cmdline = [cls.vpp_bin, "unix", "{", "nodaemon",
"cli-listen localhost:5002", "}",
"api-segment", "{", "prefix", cls.shm_prefix, "}"]
if cls.plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", cls.plugin_path])
+209
View File
@@ -0,0 +1,209 @@
#!/usr/bin/env python
import unittest
import socket
from logging import *
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint
from scapy.packet import Raw
from scapy.layers.l2 import Ether, Dot1Q, ARP
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import ICMPv6ND_NS, IPv6, UDP
from scapy.contrib.mpls import MPLS
class TestMPLS(VppTestCase):
""" MPLS Test Case """
@classmethod
def setUpClass(cls):
super(TestMPLS, cls).setUpClass()
def setUp(self):
super(TestMPLS, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(3))
# setup both interfaces
# assign them different tables.
table_id = 0
for i in self.pg_interfaces:
i.admin_up()
i.set_table_ip4(table_id)
i.set_table_ip6(table_id)
i.config_ip4()
i.config_ip6()
i.enable_mpls()
i.resolve_arp()
i.resolve_ndp()
table_id += 1
def tearDown(self):
super(TestMPLS, self).tearDown()
def create_stream_ip4(self, src_if, mpls_label, mpls_ttl):
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if.sw_if_index,
src_if.sw_if_index)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
MPLS(label=mpls_label, ttl=mpls_ttl) /
IP(src=src_if.remote_ip4, dst=src_if.remote_ip4) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def create_stream_ip6(self, src_if, mpls_label, mpls_ttl):
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if.sw_if_index,
src_if.sw_if_index)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
MPLS(label=mpls_label, ttl=mpls_ttl) /
IPv6(src=src_if.remote_ip6, dst=src_if.remote_ip6) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def verify_capture_ip4(self, src_if, capture, sent):
try:
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
# the rx'd packet has the MPLS label popped
eth = rx[Ether];
self.assertEqual(eth.type, 0x800);
tx_ip = tx[IP]
rx_ip = rx[IP]
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.ttl+1, tx_ip.ttl)
except:
raise;
def verify_capture_ip6(self, src_if, capture, sent):
try:
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
# the rx'd packet has the MPLS label popped
eth = rx[Ether];
self.assertEqual(eth.type, 0x86DD);
tx_ip = tx[IPv6]
rx_ip = rx[IPv6]
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
except:
raise;
def test_v4_exp_null(self):
""" MPLS V4 Explicit NULL test """
#
# The first test case has an MPLS TTL of 0
# all packet should be dropped
#
tx = self.create_stream_ip4(self.pg0, 0, 0)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
try:
self.assertEqual(0, len(rx));
except:
error("MPLS TTL=0 packets forwarded")
error(packet.show())
raise
#
# a stream with a non-zero MPLS TTL
# PG0 is in the default table
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, 0, 2)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_ip4(self.pg0, rx, tx)
#
# a stream with a non-zero MPLS TTL
# PG1 is in table 1
# we are ensuring the post-pop lookup occurs in the VRF table
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg1, 0, 2)
self.pg1.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture()
self.verify_capture_ip4(self.pg0, rx, tx)
def test_v6_exp_null(self):
""" MPLS V6 Explicit NULL test """
#
# a stream with a non-zero MPLS TTL
# PG0 is in the default table
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip6(self.pg0, 2, 2)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_ip6(self.pg0, rx, tx)
#
# a stream with a non-zero MPLS TTL
# PG1 is in table 1
# we are ensuring the post-pop lookup occurs in the VRF table
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip6(self.pg1, 2, 2)
self.pg1.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture()
self.verify_capture_ip6(self.pg0, rx, tx)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
+17
View File
@@ -131,6 +131,18 @@ class VppInterface(object):
self.test.vapi.sw_interface_add_del_address(
self.sw_if_index, addr, addr_len, is_ipv6=1)
def set_table_ip4(self, table_id):
"""Set the interface in a IPv4 Table.
Must be called before configuring IP4 addresses"""
self.test.vapi.sw_interface_set_table(
self.sw_if_index, 0, table_id)
def set_table_ip6(self, table_id):
"""Set the interface in a IPv6 Table.
Must be called before configuring IP6 addresses"""
self.test.vapi.sw_interface_set_table(
self.sw_if_index, 1, table_id)
def disable_ipv6_ra(self):
"""Configure IPv6 RA suppress on the VPP interface"""
self.test.vapi.sw_interface_ra_suppress(self.sw_if_index)
@@ -238,3 +250,8 @@ class VppInterface(object):
self.sub_if.append(sub_if)
else:
self.sub_if = sub_if
def enable_mpls(self):
"""Enable MPLS on the VPP interface"""
self.test.vapi.sw_interface_enable_disable_mpls(
self.sw_if_index)
+23
View File
@@ -106,6 +106,18 @@ class VppPapiProvider(object):
args = (0, b'')
return self.api(vpp_papi.sw_interface_dump, args)
def sw_interface_set_table(self, sw_if_index, is_ipv6, table_id):
"""
Set the IPvX Table-id for the Interface
:param sw_if_index:
:param is_ipv6:
:param table_id:
"""
return self.api(vpp_papi.sw_interface_set_table,
(sw_if_index, is_ipv6, table_id))
def sw_interface_add_del_address(self, sw_if_index, addr, addr_len,
is_ipv6=0, is_add=1, del_all=0):
"""
@@ -121,6 +133,17 @@ class VppPapiProvider(object):
return self.api(vpp_papi.sw_interface_add_del_address,
(sw_if_index, is_add, is_ipv6, del_all, addr_len, addr))
def sw_interface_enable_disable_mpls(self, sw_if_index,
is_enable=1):
"""
Enable/Disable MPLS on the interface
:param sw_if_index:
:param is_enable: (Default value = 1)
"""
return self.api(vpp_papi.sw_interface_set_mpls_enable,
(sw_if_index, is_enable))
def sw_interface_ra_suppress(self, sw_if_index):
suppress = 1
managed = 0
+26 -21
View File
@@ -65,15 +65,18 @@ static const char* const * const ** dpo_nodes;
/**
* @brief Vector of edge indicies from parent DPO nodes to child
*
* dpo_edges[child_type][child_proto][parent_type] = edge_index
* dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge_index
*
* This array is derived at init time from the dpo_nodes above. Note that
* the third dimension in dpo_nodes is lost, hence, the edge index from each
* node MUST be the same.
* Including both the child and parent protocol is required to support the
* case where it changes as the grapth is traversed, most notablly when an
* MPLS label is popped.
*
* Note that this array is child type specific, not child instance specific.
*/
static u32 ***dpo_edges;
static u32 ****dpo_edges;
/**
* @brief The DPO type value that can be assigend to the next dynamic
@@ -269,13 +272,15 @@ dpo_get_next_node (dpo_type_t child_type,
vec_validate(dpo_edges, child_type);
vec_validate(dpo_edges[child_type], child_proto);
vec_validate_init_empty(dpo_edges[child_type][child_proto],
parent_dpo->dpoi_type, ~0);
vec_validate(dpo_edges[child_type][child_proto], parent_type);
vec_validate_init_empty(
dpo_edges[child_type][child_proto][parent_type],
parent_proto, ~0);
/*
* if the edge index has not yet been created for this node to node transistion
*/
if (~0 == dpo_edges[child_type][child_proto][parent_type])
if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
{
vlib_node_t *parent_node, *child_node;
vlib_main_t *vm;
@@ -288,45 +293,45 @@ dpo_get_next_node (dpo_type_t child_type,
ASSERT(NULL != dpo_nodes[parent_type]);
ASSERT(NULL != dpo_nodes[parent_type][parent_proto]);
pp = 0;
cc = 0;
/*
* create a graph arc from each of the parent's registered node types,
* to each of the childs.
*/
while (NULL != dpo_nodes[child_type][child_proto][pp])
while (NULL != dpo_nodes[child_type][child_proto][cc])
{
parent_node =
child_node =
vlib_get_node_by_name(vm,
(u8*) dpo_nodes[child_type][child_proto][pp]);
(u8*) dpo_nodes[child_type][child_proto][cc]);
cc = 0;
pp = 0;
while (NULL != dpo_nodes[parent_type][child_proto][cc])
while (NULL != dpo_nodes[parent_type][parent_proto][pp])
{
child_node =
parent_node =
vlib_get_node_by_name(vm,
(u8*) dpo_nodes[parent_type][parent_proto][cc]);
(u8*) dpo_nodes[parent_type][parent_proto][pp]);
edge = vlib_node_add_next(vm,
parent_node->index,
child_node->index);
child_node->index,
parent_node->index);
if (~0 == dpo_edges[child_type][child_proto][parent_type])
if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
{
dpo_edges[child_type][child_proto][parent_type] = edge;
dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge;
}
else
{
ASSERT(dpo_edges[child_type][child_proto][parent_type] == edge);
ASSERT(dpo_edges[child_type][child_proto][parent_type][parent_proto] == edge);
}
cc++;
pp++;
}
pp++;
cc++;
}
}
return (dpo_edges[child_type][child_proto][parent_type]);
return (dpo_edges[child_type][child_proto][parent_type][parent_proto]);
}
/**
+3 -3
View File
@@ -685,10 +685,10 @@ lookup_dpo_ip6_inline (vlib_main_t * vm,
if (table_from_interface)
{
fib_index0 =
ip4_fib_table_get_index_for_sw_if_index(
ip6_fib_table_get_index_for_sw_if_index(
vnet_buffer(b0)->sw_if_index[VLIB_RX]);
fib_index1 =
ip4_fib_table_get_index_for_sw_if_index(
ip6_fib_table_get_index_for_sw_if_index(
vnet_buffer(b1)->sw_if_index[VLIB_RX]);
}
else
@@ -810,7 +810,7 @@ lookup_dpo_ip6_inline (vlib_main_t * vm,
if (table_from_interface)
{
fib_index0 =
ip4_fib_table_get_index_for_sw_if_index(
ip6_fib_table_get_index_for_sw_if_index(
vnet_buffer(b0)->sw_if_index[VLIB_RX]);
}
else
+6 -5
View File
@@ -82,11 +82,12 @@ format_mpls_label_dpo (u8 *s, va_list *args)
hdr.label_exp_s_ttl =
clib_net_to_host_u32(mld->mld_hdr.label_exp_s_ttl);
return (format(s, "mpls-label:[%d]:%U\n%U%U",
index,
format_mpls_header, hdr,
format_white_space, indent,
format_dpo_id, &mld->mld_dpo, indent+2));
s = format(s, "mpls-label:[%d]:", index);
s = format(s, "%U\n", format_mpls_header, hdr);
s = format(s, "%U", format_white_space, indent);
s = format(s, "%U", format_dpo_id, &mld->mld_dpo, indent+2);
return (s);
}
static void
+1 -16
View File
@@ -34,21 +34,6 @@ fib_entry_get_proto (const fib_entry_t * fib_entry)
return (fib_entry->fe_prefix.fp_proto);
}
static dpo_proto_t
fib_entry_get_payload_proto (const fib_entry_t * fib_entry)
{
switch (fib_entry->fe_prefix.fp_proto)
{
case FIB_PROTOCOL_IP4:
case FIB_PROTOCOL_IP6:
return fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto);
case FIB_PROTOCOL_MPLS:
return fib_entry->fe_prefix.fp_payload_proto;
}
return (fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto));
}
void
fib_entry_src_register (fib_source_t source,
const fib_entry_src_vft_t *vft)
@@ -344,7 +329,7 @@ fib_entry_src_mk_lb (fib_entry_t *fib_entry,
.fct = fct,
};
lb_proto = fib_entry_get_payload_proto(fib_entry);
lb_proto = fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto);
fib_path_list_walk(esrc->fes_pl,
fib_entry_src_collect_forwarding,
+2 -2
View File
@@ -4824,7 +4824,7 @@ fib_test_validate_lb_v (const load_balance_t *lb,
FIB_TEST_LB((exp->special.adj == dpo->dpoi_index),
"bucket %d stacks on drop %d",
bucket,
exp->adj.adj);
exp->special.adj);
break;
}
}
@@ -5489,7 +5489,7 @@ fib_test_label (void)
fib_test_lb_bucket_t bucket_drop = {
.type = FT_LB_SPECIAL,
.special = {
.adj = 1,
.adj = DPO_PROTO_IP4,
},
};