Python test IP and MPLS objects conform to infra.

Add IP[46] MFIB dump.

Change-Id: I4a2821f65e67a5416b291e4912c84f64989883b8
Signed-off-by: Neale Ranns <nranns@cisco.com>
This commit is contained in:
Neale Ranns
2017-01-26 01:18:23 -08:00
committed by Florin Coras
parent 90c55724b5
commit 5a8123bda0
18 changed files with 756 additions and 212 deletions
+38
View File
@@ -16161,6 +16161,24 @@ api_ip_fib_dump (vat_main_t * vam)
return ret;
}
static int
api_ip_mfib_dump (vat_main_t * vam)
{
vl_api_ip_mfib_dump_t *mp;
vl_api_control_ping_t *mp_ping;
int ret;
M (IP_MFIB_DUMP, mp);
S (mp);
/* Use a control ping for synchronization */
M (CONTROL_PING, mp_ping);
S (mp_ping);
W (ret);
return ret;
}
static void vl_api_ip_neighbor_details_t_handler
(vl_api_ip_neighbor_details_t * mp)
{
@@ -16353,6 +16371,24 @@ api_ip6_fib_dump (vat_main_t * vam)
return ret;
}
static int
api_ip6_mfib_dump (vat_main_t * vam)
{
vl_api_ip6_mfib_dump_t *mp;
vl_api_control_ping_t *mp_ping;
int ret;
M (IP6_MFIB_DUMP, mp);
S (mp);
/* Use a control ping for synchronization */
M (CONTROL_PING, mp_ping);
S (mp_ping);
W (ret);
return ret;
}
int
api_classify_table_ids (vat_main_t * vam)
{
@@ -18385,7 +18421,9 @@ _(flow_classify_set_interface, \
"<intfc> | sw_if_index <nn> [ip4-table <nn>] [ip6-table <nn>] [del]") \
_(flow_classify_dump, "type [ip4|ip6]") \
_(ip_fib_dump, "") \
_(ip_mfib_dump, "") \
_(ip6_fib_dump, "") \
_(ip6_mfib_dump, "") \
_(feature_enable_disable, "arc_name <arc_name> " \
"feature_name <feature_name> <intfc> | sw_if_index <nn> [disable]") \
_(sw_interface_tag_add_del, "<intfc> | sw_if_index <nn> tag <text>" \
+1 -1
View File
@@ -1043,6 +1043,7 @@ fib_table_destroy (fib_table_t *fib_table)
break;
}
}
void
fib_table_walk (u32 fib_index,
fib_protocol_t proto,
@@ -1063,7 +1064,6 @@ fib_table_walk (u32 fib_index,
}
}
void
fib_table_unlock (u32 fib_index,
fib_protocol_t proto)
+56
View File
@@ -430,6 +430,62 @@ define ip_mroute_add_del_reply
i32 retval;
};
/** \brief Dump IP multicast fib table
@param client_index - opaque cookie to identify the sender
*/
define ip_mfib_dump
{
u32 client_index;
u32 context;
};
/** \brief IP Multicast FIB table response
@param table_id - IP fib table id
@address_length - mask length
@grp_address - Group address/prefix
@src_address - Source address
@param count - the number of fib_path in path
@param path - array of of fib_path structures
*/
manual_endian manual_print define ip_mfib_details
{
u32 context;
u32 table_id;
u8 address_length;
u8 grp_address[4];
u8 src_address[4];
u32 count;
vl_api_fib_path_t path[count];
};
/** \brief Dump IP6 multicast fib table
@param client_index - opaque cookie to identify the sender
*/
define ip6_mfib_dump
{
u32 client_index;
u32 context;
};
/** \brief IP6 Multicast FIB table response
@param table_id - IP fib table id
@address_length - mask length
@grp_address - Group address/prefix
@src_address - Source address
@param count - the number of fib_path in path
@param path - array of of fib_path structures
*/
manual_endian manual_print define ip6_mfib_details
{
u32 context;
u32 table_id;
u8 address_length;
u8 grp_address[16];
u8 src_address[16];
u32 count;
vl_api_fib_path_t path[count];
};
define ip_address_details
{
u32 client_index;
+252 -2
View File
@@ -33,9 +33,10 @@
#include <vnet/dpo/classify_dpo.h>
#include <vnet/dpo/ip_null_dpo.h>
#include <vnet/ethernet/arp_packet.h>
//#include <vnet/mfib/ip6_mfib.h>
#include <vnet/mfib/ip6_mfib.h>
#include <vnet/mfib/ip4_mfib.h>
#include <vnet/mfib/mfib_signal.h>
#include <vnet/mfib/mfib_entry.h>
#include <vnet/vnet_msg_enum.h>
@@ -55,14 +56,19 @@
#include <vlibapi/api_helper_macros.h>
#define foreach_ip_api_msg \
_(IP_FIB_DUMP, ip_fib_dump) \
_(IP_FIB_DETAILS, ip_fib_details) \
_(IP6_FIB_DUMP, ip6_fib_dump) \
_(IP6_FIB_DETAILS, ip6_fib_details) \
_(IP_MFIB_DUMP, ip_mfib_dump) \
_(IP_MFIB_DETAILS, ip_mfib_details) \
_(IP6_MFIB_DUMP, ip6_mfib_dump) \
_(IP6_MFIB_DETAILS, ip6_mfib_details) \
_(IP_NEIGHBOR_DUMP, ip_neighbor_dump) \
_(IP_MROUTE_ADD_DEL, ip_mroute_add_del) \
_(MFIB_SIGNAL_DUMP, mfib_signal_dump) \
_(MFIB_SIGNAL_DUMP, mfib_signal_dump) \
_(IP_NEIGHBOR_DETAILS, ip_neighbor_details) \
_(IP_ADDRESS_DUMP, ip_address_dump) \
_(IP_DUMP, ip_dump) \
@@ -463,6 +469,250 @@ vl_api_ip6_fib_dump_t_handler (vl_api_ip6_fib_dump_t * mp)
/* *INDENT-ON* */
}
static void
vl_api_ip_mfib_details_t_handler (vl_api_ip_mfib_details_t * mp)
{
clib_warning ("BUG");
}
static void
vl_api_ip_mfib_details_t_endian (vl_api_ip_mfib_details_t * mp)
{
clib_warning ("BUG");
}
static void
vl_api_ip_mfib_details_t_print (vl_api_ip_mfib_details_t * mp)
{
clib_warning ("BUG");
}
static void
send_ip_mfib_details (vpe_api_main_t * am,
unix_shared_memory_queue_t * q,
u32 table_id,
mfib_prefix_t * pfx,
fib_route_path_encode_t * api_rpaths, u32 context)
{
vl_api_ip_mfib_details_t *mp;
fib_route_path_encode_t *api_rpath;
vl_api_fib_path_t *fp;
int path_count;
path_count = vec_len (api_rpaths);
mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp));
if (!mp)
return;
memset (mp, 0, sizeof (*mp));
mp->_vl_msg_id = ntohs (VL_API_IP_FIB_DETAILS);
mp->context = context;
mp->table_id = htonl (table_id);
mp->address_length = pfx->fp_len;
memcpy (mp->grp_address, &pfx->fp_grp_addr.ip4,
sizeof (pfx->fp_grp_addr.ip4));
memcpy (mp->src_address, &pfx->fp_src_addr.ip4,
sizeof (pfx->fp_src_addr.ip4));
mp->count = htonl (path_count);
fp = mp->path;
vec_foreach (api_rpath, api_rpaths)
{
memset (fp, 0, sizeof (*fp));
fp->weight = 0;
fp->sw_if_index = htonl (api_rpath->rpath.frp_sw_if_index);
copy_fib_next_hop (api_rpath, fp);
fp++;
}
vl_msg_api_send_shmem (q, (u8 *) & mp);
}
typedef struct vl_api_ip_mfib_dump_ctc_t_
{
fib_node_index_t *entries;
} vl_api_ip_mfib_dump_ctc_t;
static int
vl_api_ip_mfib_table_dump_walk (fib_node_index_t fei, void *arg)
{
vl_api_ip_mfib_dump_ctc_t *ctx = arg;
vec_add1 (ctx->entries, fei);
return (0);
}
static void
vl_api_ip_mfib_dump_t_handler (vl_api_ip_mfib_dump_t * mp)
{
vpe_api_main_t *am = &vpe_api_main;
unix_shared_memory_queue_t *q;
ip4_main_t *im = &ip4_main;
mfib_table_t *mfib_table;
fib_node_index_t *mfeip;
mfib_prefix_t pfx;
fib_route_path_encode_t *api_rpaths = NULL;
vl_api_ip_mfib_dump_ctc_t ctx = {
.entries = NULL,
};
q = vl_api_client_index_to_input_queue (mp->client_index);
if (q == 0)
return;
/* *INDENT-OFF* */
pool_foreach (mfib_table, im->mfibs,
({
ip4_mfib_table_walk(&mfib_table->v4,
vl_api_ip_mfib_table_dump_walk,
&ctx);
vec_sort_with_function (ctx.entries, mfib_entry_cmp_for_sort);
vec_foreach (mfeip, ctx.entries)
{
mfib_entry_get_prefix (*mfeip, &pfx);
mfib_entry_encode (*mfeip, &api_rpaths);
send_ip_mfib_details (am, q,
mfib_table->mft_table_id,
&pfx, api_rpaths,
mp->context);
}
vec_reset_length (api_rpaths);
vec_reset_length (ctx.entries);
}));
/* *INDENT-ON* */
vec_free (ctx.entries);
vec_free (api_rpaths);
}
static void
vl_api_ip6_mfib_details_t_handler (vl_api_ip6_mfib_details_t * mp)
{
clib_warning ("BUG");
}
static void
vl_api_ip6_mfib_details_t_endian (vl_api_ip6_mfib_details_t * mp)
{
clib_warning ("BUG");
}
static void
vl_api_ip6_mfib_details_t_print (vl_api_ip6_mfib_details_t * mp)
{
clib_warning ("BUG");
}
static void
send_ip6_mfib_details (vpe_api_main_t * am,
unix_shared_memory_queue_t * q,
u32 table_id,
mfib_prefix_t * pfx,
fib_route_path_encode_t * api_rpaths, u32 context)
{
vl_api_ip6_mfib_details_t *mp;
fib_route_path_encode_t *api_rpath;
vl_api_fib_path_t *fp;
int path_count;
path_count = vec_len (api_rpaths);
mp = vl_msg_api_alloc (sizeof (*mp) + path_count * sizeof (*fp));
if (!mp)
return;
memset (mp, 0, sizeof (*mp));
mp->_vl_msg_id = ntohs (VL_API_IP6_FIB_DETAILS);
mp->context = context;
mp->table_id = htonl (table_id);
mp->address_length = pfx->fp_len;
memcpy (mp->grp_address, &pfx->fp_grp_addr.ip6,
sizeof (pfx->fp_grp_addr.ip6));
memcpy (mp->src_address, &pfx->fp_src_addr.ip6,
sizeof (pfx->fp_src_addr.ip6));
mp->count = htonl (path_count);
fp = mp->path;
vec_foreach (api_rpath, api_rpaths)
{
memset (fp, 0, sizeof (*fp));
fp->weight = 0;
fp->sw_if_index = htonl (api_rpath->rpath.frp_sw_if_index);
copy_fib_next_hop (api_rpath, fp);
fp++;
}
vl_msg_api_send_shmem (q, (u8 *) & mp);
}
typedef struct vl_api_ip6_mfib_dump_ctc_t_
{
fib_node_index_t *entries;
} vl_api_ip6_mfib_dump_ctc_t;
static int
vl_api_ip6_mfib_table_dump_walk (fib_node_index_t fei, void *arg)
{
vl_api_ip6_mfib_dump_ctc_t *ctx = arg;
vec_add1 (ctx->entries, fei);
return (0);
}
static void
vl_api_ip6_mfib_dump_t_handler (vl_api_ip6_mfib_dump_t * mp)
{
vpe_api_main_t *am = &vpe_api_main;
unix_shared_memory_queue_t *q;
ip6_main_t *im = &ip6_main;
mfib_table_t *mfib_table;
fib_node_index_t *mfeip;
mfib_prefix_t pfx;
fib_route_path_encode_t *api_rpaths = NULL;
vl_api_ip6_mfib_dump_ctc_t ctx = {
.entries = NULL,
};
q = vl_api_client_index_to_input_queue (mp->client_index);
if (q == 0)
return;
/* *INDENT-OFF* */
pool_foreach (mfib_table, im->mfibs,
({
ip6_mfib_table_walk(&mfib_table->v6,
vl_api_ip6_mfib_table_dump_walk,
&ctx);
vec_sort_with_function (ctx.entries, mfib_entry_cmp_for_sort);
vec_foreach(mfeip, ctx.entries)
{
mfib_entry_get_prefix (*mfeip, &pfx);
mfib_entry_encode (*mfeip, &api_rpaths);
send_ip6_mfib_details (am, q,
mfib_table->mft_table_id,
&pfx, api_rpaths,
mp->context);
}
vec_reset_length (api_rpaths);
vec_reset_length (ctx.entries);
}));
/* *INDENT-ON* */
vec_free (ctx.entries);
vec_free (api_rpaths);
}
static void
vl_api_ip_neighbor_add_del_t_handler (vl_api_ip_neighbor_add_del_t * mp,
vlib_main_t * vm)
+23
View File
@@ -279,6 +279,29 @@ ip4_mfib_table_entry_remove (ip4_mfib_t *mfib,
mfib->fib_entry_by_dst_address[len] = hash;
}
void
ip4_mfib_table_walk (ip4_mfib_t *mfib,
mfib_table_walk_fn_t fn,
void *ctx)
{
int i;
for (i = 0; i < ARRAY_LEN (mfib->fib_entry_by_dst_address); i++)
{
uword * hash = mfib->fib_entry_by_dst_address[i];
if (NULL != hash)
{
hash_pair_t * p;
hash_foreach_pair (p, hash,
({
fn(p->value[0], ctx);
}));
}
}
}
static void
ip4_mfib_table_show_all (ip4_mfib_t *mfib,
vlib_main_t * vm)
+10 -1
View File
@@ -90,6 +90,15 @@ u32 ip4_mfib_index_from_table_id (u32 table_id)
extern u32 ip4_mfib_table_get_index_for_sw_if_index(u32 sw_if_index);
/**
* @brief Walk the IP4 mfib table.
*
* @param mfib the table to walk
* @param fn The function to invoke on each entry visited
* @param ctx A context passed in the visit function
*/
extern void ip4_mfib_table_walk (ip4_mfib_t *mfib,
mfib_table_walk_fn_t fn,
void *ctx);
#endif
+40 -10
View File
@@ -483,20 +483,16 @@ ip6_mfib_table_show_one (ip6_mfib_t *mfib,
}
typedef struct ip6_mfib_show_ctx_t_ {
u32 fib_index;
fib_node_index_t *entries;
} ip6_mfib_show_ctx_t;
static int
ip6_mfib_table_collect_entries (struct radix_node *rn, void *arg)
ip6_mfib_table_collect_entries (fib_node_index_t mfei, void *arg)
{
ip6_mfib_show_ctx_t *ctx = arg;
ip6_mfib_node_t *i6mn;
i6mn = (ip6_mfib_node_t*) rn;
vec_add1(ctx->entries, i6mn->i6mn_entry);
vec_add1(ctx->entries, mfei);
return (0);
}
@@ -507,13 +503,12 @@ ip6_mfib_table_show_all (ip6_mfib_t *mfib,
{
fib_node_index_t *mfib_entry_index;
ip6_mfib_show_ctx_t ctx = {
.fib_index = mfib->index,
.entries = NULL,
};
rn_walktree(mfib->rhead,
ip6_mfib_table_collect_entries,
&ctx);
ip6_mfib_table_walk(mfib,
ip6_mfib_table_collect_entries,
&ctx);
vec_sort_with_function(ctx.entries, mfib_entry_cmp_for_sort);
@@ -528,6 +523,41 @@ ip6_mfib_table_show_all (ip6_mfib_t *mfib,
vec_free(ctx.entries);
}
typedef struct ip6_mfib_radix_walk_ctx_t_
{
mfib_table_walk_fn_t user_fn;
void *user_ctx;
} ip6_mfib_radix_walk_ctx_t;
static int
ip6_mfib_table_radix_walk (struct radix_node *rn,
void *arg)
{
ip6_mfib_radix_walk_ctx_t *ctx = arg;
ip6_mfib_node_t *i6mn;
i6mn = (ip6_mfib_node_t*) rn;
ctx->user_fn(i6mn->i6mn_entry, ctx->user_ctx);
return (0);
}
void
ip6_mfib_table_walk (ip6_mfib_t *mfib,
mfib_table_walk_fn_t fn,
void *ctx)
{
ip6_mfib_radix_walk_ctx_t rn_ctx = {
.user_fn = fn,
.user_ctx = ctx,
};
rn_walktree(mfib->rhead,
ip6_mfib_table_radix_walk,
&rn_ctx);
}
static clib_error_t *
ip6_show_mfib (vlib_main_t * vm,
unformat_input_t * input,
+11
View File
@@ -105,5 +105,16 @@ extern fib_node_index_t ip6_mfib_table_lookup2(const ip6_mfib_t *mfib,
const ip6_address_t *src,
const ip6_address_t *grp);
/**
* @brief Walk the IP6 mfib table.
*
* @param mfib the table to walk
* @param fn The function to invoke on each entry visited
* @param ctx A context passed in the visit function
*/
extern void ip6_mfib_table_walk (ip6_mfib_t *mfib,
mfib_table_walk_fn_t fn,
void *ctx);
#endif
+7 -1
View File
@@ -1050,9 +1050,15 @@ mfib_entry_encode (fib_node_index_t mfib_entry_index,
mfib_entry_t *mfib_entry;
mfib_entry = mfib_entry_get(mfib_entry_index);
fib_path_list_walk(mfib_entry->mfe_parent, fib_path_encode, api_rpaths);
if (FIB_NODE_INDEX_INVALID != mfib_entry->mfe_parent)
{
fib_path_list_walk(mfib_entry->mfe_parent,
fib_path_encode,
api_rpaths);
}
}
void
mfib_entry_get_prefix (fib_node_index_t mfib_entry_index,
mfib_prefix_t *pfx)
+3
View File
@@ -130,6 +130,9 @@ extern void mfib_entry_contribute_forwarding(
fib_forward_chain_type_t type,
dpo_id_t *dpo);
extern void mfib_entry_encode(fib_node_index_t fib_entry_index,
fib_route_path_encode_t **api_rpaths);
extern void mfib_entry_module_init(void);
+19
View File
@@ -489,6 +489,25 @@ mfib_table_lock (u32 fib_index,
mfib_table->mft_locks++;
}
void
mfib_table_walk (u32 fib_index,
fib_protocol_t proto,
mfib_table_walk_fn_t fn,
void *ctx)
{
switch (proto)
{
case FIB_PROTOCOL_IP4:
ip4_mfib_table_walk(ip4_mfib_get(fib_index), fn, ctx);
break;
case FIB_PROTOCOL_IP6:
ip6_mfib_table_walk(ip6_mfib_get(fib_index), fn, ctx);
break;
case FIB_PROTOCOL_MPLS:
break;
}
}
u8*
format_mfib_table_name (u8* s, va_list ap)
{
+16
View File
@@ -360,4 +360,20 @@ extern u32 mfib_table_get_num_entries(u32 fib_index,
extern mfib_table_t *mfib_table_get(fib_node_index_t index,
fib_protocol_t proto);
/**
* @brief Call back function when walking entries in a FIB table
*/
typedef int (*mfib_table_walk_fn_t)(fib_node_index_t fei,
void *ctx);
/**
* @brief Walk all entries in a FIB table
* N.B: This is NOT safe to deletes. If you need to delete, walk the whole
* table and store elements in a vector, then delete the elements
*/
extern void mfib_table_walk(u32 fib_index,
fib_protocol_t proto,
mfib_table_walk_fn_t fn,
void *ctx);
#endif
+19 -17
View File
@@ -6,7 +6,7 @@ from logging import *
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppDot1QSubint
from vpp_gre_interface import VppGreInterface
from vpp_ip_route import IpRoute, RoutePath
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_papi_provider import L2_VTR_OP
from scapy.packet import Raw
@@ -298,8 +298,9 @@ class TestGRE(VppTestCase):
gre_if.admin_up()
gre_if.config_ip4()
route_via_tun = IpRoute(self, "4.4.4.4", 32,
[RoutePath("0.0.0.0", gre_if.sw_if_index)])
route_via_tun = VppIpRoute(self, "4.4.4.4", 32,
[VppRoutePath("0.0.0.0",
gre_if.sw_if_index)])
route_via_tun.add_vpp_config()
@@ -321,9 +322,9 @@ class TestGRE(VppTestCase):
#
# Add a route that resolves the tunnel's destination
#
route_tun_dst = IpRoute(self, "1.1.1.2", 32,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index)])
route_tun_dst = VppIpRoute(self, "1.1.1.2", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index)])
route_tun_dst.add_vpp_config()
#
@@ -453,17 +454,18 @@ class TestGRE(VppTestCase):
#
# Add a route via the tunnel - in the overlay
#
route_via_tun = IpRoute(self, "9.9.9.9", 32,
[RoutePath("0.0.0.0", gre_if.sw_if_index)])
route_via_tun = VppIpRoute(self, "9.9.9.9", 32,
[VppRoutePath("0.0.0.0",
gre_if.sw_if_index)])
route_via_tun.add_vpp_config()
#
# Add a route that resolves the tunnel's destination - in the
# underlay table
#
route_tun_dst = IpRoute(self, "2.2.2.2", 32, table_id=1,
paths=[RoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index)])
route_tun_dst = VppIpRoute(self, "2.2.2.2", 32, table_id=1,
paths=[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index)])
route_tun_dst.add_vpp_config()
#
@@ -514,12 +516,12 @@ class TestGRE(VppTestCase):
#
# Add routes to resolve the tunnel destinations
#
route_tun1_dst = IpRoute(self, "2.2.2.2", 32,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index)])
route_tun2_dst = IpRoute(self, "2.2.2.3", 32,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index)])
route_tun1_dst = VppIpRoute(self, "2.2.2.2", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index)])
route_tun2_dst = VppIpRoute(self, "2.2.2.3", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index)])
route_tun1_dst.add_vpp_config()
route_tun2_dst.add_vpp_config()
+85 -70
View File
@@ -4,7 +4,7 @@ import unittest
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint
from vpp_ip_route import IpMRoute, MRoutePath, MFibSignal
from vpp_ip_route import VppIpMRoute, VppMRoutePath, VppMFibSignal
from scapy.packet import Raw
from scapy.layers.l2 import Ether
@@ -38,6 +38,21 @@ class MRouteEntryFlags:
N_PKTS_IN_STREAM = 90
class TestMFIB(VppTestCase):
""" MFIB Test Case """
def setUp(self):
super(TestMFIB, self).setUp()
def test_mfib(self):
""" MFIB Unit Tests """
error = self.vapi.cli("test mfib")
if error:
self.logger.critical(error)
self.assertEqual(error.find("Failed"), -1)
class TestIPMcast(VppTestCase):
""" IP Multicast Test Case """
@@ -163,51 +178,51 @@ class TestIPMcast(VppTestCase):
# A (*,G).
# one accepting interface, pg0, 3 forwarding interfaces
#
route_232_1_1_1 = IpMRoute(
route_232_1_1_1 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[MRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
MRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
MRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
MRoutePath(self.pg3.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg3.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
#
# An (S,G).
# one accepting interface, pg0, 2 forwarding interfaces
#
route_1_1_1_1_232_1_1_1 = IpMRoute(
route_1_1_1_1_232_1_1_1 = VppIpMRoute(
self,
"1.1.1.1",
"232.1.1.1", 64,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[MRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
MRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
MRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_1_1_1_1_232_1_1_1.add_vpp_config()
#
# An (*,G/m).
# one accepting interface, pg0, 1 forwarding interfaces
#
route_232 = IpMRoute(
route_232 = VppIpMRoute(
self,
"0.0.0.0",
"232.0.0.0", 8,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[MRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
MRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232.add_vpp_config()
#
@@ -296,19 +311,19 @@ class TestIPMcast(VppTestCase):
# A (*,G).
# one accepting interface, pg0, 3 forwarding interfaces
#
route_ff01_1 = IpMRoute(
route_ff01_1 = VppIpMRoute(
self,
"::",
"ff01::1", 128,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[MRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
MRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
MRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
MRoutePath(self.pg3.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg3.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
is_ip6=1)
route_ff01_1.add_vpp_config()
@@ -316,17 +331,17 @@ class TestIPMcast(VppTestCase):
# An (S,G).
# one accepting interface, pg0, 2 forwarding interfaces
#
route_2001_ff01_1 = IpMRoute(
route_2001_ff01_1 = VppIpMRoute(
self,
"2001::1",
"ff01::1", 256,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[MRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
MRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
MRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
is_ip6=1)
route_2001_ff01_1.add_vpp_config()
@@ -334,15 +349,15 @@ class TestIPMcast(VppTestCase):
# An (*,G/m).
# one accepting interface, pg0, 1 forwarding interface
#
route_ff01 = IpMRoute(
route_ff01 = VppIpMRoute(
self,
"::",
"ff01::", 16,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[MRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
MRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
is_ip6=1)
route_ff01.add_vpp_config()
@@ -432,15 +447,15 @@ class TestIPMcast(VppTestCase):
# A (*,G).
# one accepting interface, pg0, 1 forwarding interfaces
#
route_232_1_1_1 = IpMRoute(
route_232_1_1_1 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[MRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
MRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
route_232_1_1_1.update_entry_flags(
@@ -454,10 +469,10 @@ class TestIPMcast(VppTestCase):
#
# Constrct a representation of the signal we expect on pg0
#
signal_232_1_1_1_itf_0 = MFibSignal(self,
route_232_1_1_1,
self.pg0.sw_if_index,
tx[0])
signal_232_1_1_1_itf_0 = VppMFibSignal(self,
route_232_1_1_1,
self.pg0.sw_if_index,
tx[0])
#
# read the only expected signal
@@ -482,15 +497,15 @@ class TestIPMcast(VppTestCase):
# A Second entry with connected check
# one accepting interface, pg0, 1 forwarding interfaces
#
route_232_1_1_2 = IpMRoute(
route_232_1_1_2 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.2", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[MRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
MRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_2.add_vpp_config()
route_232_1_1_2.update_entry_flags(
@@ -499,10 +514,10 @@ class TestIPMcast(VppTestCase):
#
# Send traffic to both entries. One read should net us two signals
#
signal_232_1_1_2_itf_0 = MFibSignal(self,
route_232_1_1_2,
self.pg0.sw_if_index,
tx[0])
signal_232_1_1_2_itf_0 = VppMFibSignal(self,
route_232_1_1_2,
self.pg0.sw_if_index,
tx[0])
tx = self._mcast_connected_send_stream("232.1.1.1")
tx2 = self._mcast_connected_send_stream("232.1.1.2")
@@ -526,15 +541,15 @@ class TestIPMcast(VppTestCase):
# A (*,G).
# one accepting interface, pg0, 1 forwarding interfaces
#
route_232_1_1_1 = IpMRoute(
route_232_1_1_1 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[MRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
MRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
route_232_1_1_1.update_entry_flags(
@@ -548,10 +563,10 @@ class TestIPMcast(VppTestCase):
#
# Constrct a representation of the signal we expect on pg0
#
signal_232_1_1_1_itf_0 = MFibSignal(self,
route_232_1_1_1,
self.pg0.sw_if_index,
tx[0])
signal_232_1_1_1_itf_0 = VppMFibSignal(self,
route_232_1_1_1,
self.pg0.sw_if_index,
tx[0])
#
# read the only expected signal
-23
View File
@@ -1,23 +0,0 @@
#!/usr/bin/env python
import unittest
from framework import VppTestCase, VppTestRunner
class TestMFIB(VppTestCase):
""" MFIB Test Case """
def setUp(self):
super(TestMFIB, self).setUp()
def test_mfib(self):
""" MFIB Unit Tests """
error = self.vapi.cli("test mfib")
if error:
self.logger.critical(error)
self.assertEqual(error.find("Failed"), -1)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
+66 -75
View File
@@ -4,7 +4,8 @@ import unittest
import socket
from framework import VppTestCase, VppTestRunner
from vpp_ip_route import IpRoute, RoutePath, MplsRoute, MplsIpBind
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
VppMplsIpBind
from scapy.packet import Raw
from scapy.layers.l2 import Ether
@@ -258,10 +259,10 @@ class TestMPLS(VppTestCase):
#
# A simple MPLS xconnect - eos label in label out
#
route_32_eos = MplsRoute(self, 32, 1,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[33])])
route_32_eos = VppMplsRoute(self, 32, 1,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[33])])
route_32_eos.add_vpp_config()
#
@@ -281,10 +282,10 @@ class TestMPLS(VppTestCase):
#
# A simple MPLS xconnect - non-eos label in label out
#
route_32_neos = MplsRoute(self, 32, 0,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[33])])
route_32_neos = VppMplsRoute(self, 32, 0,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[33])])
route_32_neos.add_vpp_config()
#
@@ -304,10 +305,10 @@ class TestMPLS(VppTestCase):
#
# An MPLS xconnect - EOS label in IP out
#
route_33_eos = MplsRoute(self, 33, 1,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[])])
route_33_eos = VppMplsRoute(self, 33, 1,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[])])
route_33_eos.add_vpp_config()
self.vapi.cli("clear trace")
@@ -324,10 +325,10 @@ class TestMPLS(VppTestCase):
# An MPLS xconnect - non-EOS label in IP out - an invalid configuration
# so this traffic should be dropped.
#
route_33_neos = MplsRoute(self, 33, 0,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[])])
route_33_neos = VppMplsRoute(self, 33, 0,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[])])
route_33_neos.add_vpp_config()
self.vapi.cli("clear trace")
@@ -342,11 +343,11 @@ class TestMPLS(VppTestCase):
#
# A recursive EOS x-connect, which resolves through another x-connect
#
route_34_eos = MplsRoute(self, 34, 1,
[RoutePath("0.0.0.0",
0xffffffff,
nh_via_label=32,
labels=[44, 45])])
route_34_eos = VppMplsRoute(self, 34, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=32,
labels=[44, 45])])
route_34_eos.add_vpp_config()
tx = self.create_stream_labelled_ip4(self.pg0, [34])
@@ -362,11 +363,11 @@ class TestMPLS(VppTestCase):
# A recursive non-EOS x-connect, which resolves through another
# x-connect
#
route_34_neos = MplsRoute(self, 34, 0,
[RoutePath("0.0.0.0",
0xffffffff,
nh_via_label=32,
labels=[44, 46])])
route_34_neos = VppMplsRoute(self, 34, 0,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=32,
labels=[44, 46])])
route_34_neos.add_vpp_config()
self.vapi.cli("clear trace")
@@ -384,11 +385,11 @@ class TestMPLS(VppTestCase):
# an recursive IP route that resolves through the recursive non-eos
# x-connect
#
ip_10_0_0_1 = IpRoute(self, "10.0.0.1", 32,
[RoutePath("0.0.0.0",
0xffffffff,
nh_via_label=34,
labels=[55])])
ip_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=34,
labels=[55])])
ip_10_0_0_1.add_vpp_config()
self.vapi.cli("clear trace")
@@ -415,14 +416,14 @@ class TestMPLS(VppTestCase):
#
# Add a non-recursive route with a single out label
#
route_10_0_0_1 = IpRoute(self, "10.0.0.1", 32,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[45])])
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[45])])
route_10_0_0_1.add_vpp_config()
# bind a local label to the route
binding = MplsIpBind(self, 44, "10.0.0.1", 32)
binding = VppMplsIpBind(self, 44, "10.0.0.1", 32)
binding.add_vpp_config()
# non-EOS stream
@@ -470,10 +471,10 @@ class TestMPLS(VppTestCase):
#
# Add a non-recursive route with a single out label
#
route_10_0_0_1 = IpRoute(self, "10.0.0.1", 32,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[32])])
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[32])])
route_10_0_0_1.add_vpp_config()
#
@@ -493,10 +494,10 @@ class TestMPLS(VppTestCase):
#
# Add a non-recursive route with a 3 out labels
#
route_10_0_0_2 = IpRoute(self, "10.0.0.2", 32,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[32, 33, 34])])
route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[32, 33, 34])])
route_10_0_0_2.add_vpp_config()
#
@@ -516,10 +517,10 @@ class TestMPLS(VppTestCase):
#
# add a recursive path, with output label, via the 1 label route
#
route_11_0_0_1 = IpRoute(self, "11.0.0.1", 32,
[RoutePath("10.0.0.1",
0xffffffff,
labels=[44])])
route_11_0_0_1 = VppIpRoute(self, "11.0.0.1", 32,
[VppRoutePath("10.0.0.1",
0xffffffff,
labels=[44])])
route_11_0_0_1.add_vpp_config()
#
@@ -539,10 +540,10 @@ class TestMPLS(VppTestCase):
#
# add a recursive path, with 2 labels, via the 3 label route
#
route_11_0_0_2 = IpRoute(self, "11.0.0.2", 32,
[RoutePath("10.0.0.2",
0xffffffff,
labels=[44, 45])])
route_11_0_0_2 = VppIpRoute(self, "11.0.0.2", 32,
[VppRoutePath("10.0.0.2",
0xffffffff,
labels=[44, 45])])
route_11_0_0_2.add_vpp_config()
#
@@ -590,20 +591,10 @@ class TestMPLS(VppTestCase):
#
# add an unlabelled route through the new tunnel
#
dest_addr = socket.inet_pton(socket.AF_INET, "10.0.0.3")
nh_addr = socket.inet_pton(socket.AF_INET, "0.0.0.0")
dest_addr_len = 32
self.vapi.ip_add_del_route(
dest_addr,
dest_addr_len,
nh_addr, # all zeros next-hop - tunnel is p2p
reply.sw_if_index, # sw_if_index of the new tunnel
0, # table-id
0, # next-hop-table-id
1, # next-hop-weight
0, # num-out-labels,
[]) # out-label
route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
[VppRoutePath("0.0.0.0",
reply.sw_if_index)])
route_10_0_0_3.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
@@ -696,10 +687,10 @@ class TestMPLS(VppTestCase):
#
# A de-agg route - next-hop lookup in default table
#
route_34_eos = MplsRoute(self, 34, 1,
[RoutePath("0.0.0.0",
0xffffffff,
nh_table_id=0)])
route_34_eos = VppMplsRoute(self, 34, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=0)])
route_34_eos.add_vpp_config()
#
@@ -720,10 +711,10 @@ class TestMPLS(VppTestCase):
#
# A de-agg route - next-hop lookup in non-default table
#
route_35_eos = MplsRoute(self, 35, 1,
[RoutePath("0.0.0.0",
0xffffffff,
nh_table_id=1)])
route_35_eos = VppMplsRoute(self, 35, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=1)])
route_35_eos.add_vpp_config()
#
+104 -12
View File
@@ -5,13 +5,14 @@
"""
import socket
from vpp_object import *
# from vnet/vnet/mpls/mpls_types.h
MPLS_IETF_MAX_LABEL = 0xfffff
MPLS_LABEL_INVALID = MPLS_IETF_MAX_LABEL + 1
class RoutePath(object):
class VppRoutePath(object):
def __init__(
self,
@@ -31,15 +32,15 @@ class RoutePath(object):
self.nh_addr = socket.inet_pton(socket.AF_INET, nh_addr)
class MRoutePath(RoutePath):
class VppMRoutePath(VppRoutePath):
def __init__(self, nh_sw_if_index, flags):
super(MRoutePath, self).__init__("0.0.0.0",
nh_sw_if_index)
super(VppMRoutePath, self).__init__("0.0.0.0",
nh_sw_if_index)
self.nh_i_flags = flags
class IpRoute:
class VppIpRoute(VppObject):
"""
IP Route
"""
@@ -80,6 +81,7 @@ class IpRoute:
path.nh_labels),
next_hop_via_label=path.nh_via_label,
is_ipv6=self.is_ip6)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
if self.is_local:
@@ -101,8 +103,26 @@ class IpRoute:
table_id=self.table_id,
is_add=0)
def query_vpp_config(self):
dump = self._test.vapi.ip_fib_dump()
for e in dump:
if self.dest_addr == e.address \
and self.dest_addr_len == e.address_length \
and self.table_id == e.table_id:
return True
return False
class IpMRoute:
def __str__(self):
return self.object_id()
def object_id(self):
return ("%d:%s/%d"
% (self.table_id,
socket.inet_ntop(socket.AF_INET, self.dest_addr),
self.dest_addr_len))
class VppIpMRoute(VppObject):
"""
IP Multicast Route
"""
@@ -133,6 +153,7 @@ class IpMRoute:
path.nh_i_flags,
table_id=self.table_id,
is_ipv6=self.is_ip6)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
for path in self.paths:
@@ -171,8 +192,34 @@ class IpMRoute:
table_id=self.table_id,
is_ipv6=self.is_ip6)
def query_vpp_config(self):
dump = self._test.vapi.ip_fib_dump()
for e in dump:
if self.grp_addr == e.address \
and self.grp_addr_len == e.address_length \
and self.table_id == e.table_id:
return True
return False
class MFibSignal:
def __str__(self):
return self.object_id()
def object_id(self):
if self.is_ip6:
return ("%d:(%s,%s/%d)"
% (self.table_id,
socket.inet_ntop(socket.AF_INET6, self.src_addr),
socket.inet_ntop(socket.AF_INET6, self.grp_addr),
self.grp_addr_len))
else:
return ("%d:(%s,%s/%d)"
% (self.table_id,
socket.inet_ntop(socket.AF_INET, self.src_addr),
socket.inet_ntop(socket.AF_INET, self.grp_addr),
self.grp_addr_len))
class VppMFibSignal(object):
def __init__(self, test, route, interface, packet):
self.route = route
self.interface = interface
@@ -193,21 +240,27 @@ class MFibSignal:
signal.src_address[i])
class MplsIpBind:
class VppMplsIpBind(VppObject):
"""
MPLS to IP Binding
"""
def __init__(self, test, local_label, dest_addr, dest_addr_len):
def __init__(self, test, local_label, dest_addr, dest_addr_len,
table_id=0, ip_table_id=0):
self._test = test
self.dest_addr = socket.inet_pton(socket.AF_INET, dest_addr)
self.dest_addr_len = dest_addr_len
self.local_label = local_label
self.table_id = table_id
self.ip_table_id = ip_table_id
def add_vpp_config(self):
self._test.vapi.mpls_ip_bind_unbind(self.local_label,
self.dest_addr,
self.dest_addr_len)
self.dest_addr_len,
table_id=self.table_id,
ip_table_id=self.ip_table_id)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.mpls_ip_bind_unbind(self.local_label,
@@ -215,10 +268,30 @@ class MplsIpBind:
self.dest_addr_len,
is_bind=0)
def query_vpp_config(self):
dump = self._test.vapi.mpls_fib_dump()
for e in dump:
if self.local_label == e.label \
and self.eos_bit == e.eos_bit \
and self.table_id == e.table_id:
return True
return False
class MplsRoute:
def __str__(self):
return self.object_id()
def object_id(self):
return ("%d:%s binds %d:%s/%d"
% (self.table_id,
self.local_label,
self.ip_table_id,
socket.inet_ntop(socket.AF_INET, self.dest_addr),
self.dest_addr_len))
class VppMplsRoute(VppObject):
"""
MPLS Route
MPLS Route/LSP
"""
def __init__(self, test, local_label, eos_bit, paths, table_id=0):
@@ -242,6 +315,7 @@ class MplsRoute:
path.nh_labels),
next_hop_via_label=path.nh_via_label,
next_hop_table_id=path.nh_table_id)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
for path in self.paths:
@@ -252,3 +326,21 @@ class MplsRoute:
path.nh_itf,
table_id=self.table_id,
is_add=0)
def query_vpp_config(self):
dump = self._test.vapi.mpls_fib_dump()
for e in dump:
if self.local_label == e.label \
and self.eos_bit == e.eos_bit \
and self.table_id == e.table_id:
return True
return False
def __str__(self):
return self.object_id()
def object_id(self):
return ("%d:%s/%d"
% (self.table_id,
self.local_label,
20+self.eos_bit))
+6
View File
@@ -721,6 +721,9 @@ class VppPapiProvider(object):
'outer_fib_id': outer_fib_id}
)
def mpls_fib_dump(self):
return self.api(self.papi.mpls_fib_dump, {})
def mpls_route_add_del(
self,
label,
@@ -1291,3 +1294,6 @@ class VppPapiProvider(object):
def mfib_signal_dump(self):
return self.api(self.papi.mfib_signal_dump, {})
def ip_mfib_dump(self):
return self.api(self.papi.ip_mfib_dump, {})