IP Mcast - recalculate on interface up/dowm

Change-Id: Ie5b88fd7187ed62218a2e4e0e493c33e3e9ecc2f
Signed-off-by: Neale Ranns <nranns@cisco.com>
This commit is contained in:
Neale Ranns
2017-05-30 09:53:52 -07:00
committed by Damjan Marion
parent 10d8cc6bf9
commit c2aad53aa5
4 changed files with 115 additions and 78 deletions

View File

@ -230,9 +230,9 @@ VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(adj_mcast_interface_state_change);
* HW interface state changes
*/
static void
adj_nbr_hw_sw_interface_state_change (vnet_main_t * vnm,
u32 sw_if_index,
void *arg)
adj_mcast_hw_sw_interface_state_change (vnet_main_t * vnm,
u32 sw_if_index,
void *arg)
{
adj_mcast_interface_state_change(vnm, sw_if_index, (uword) arg);
}
@ -255,7 +255,7 @@ adj_mcast_hw_interface_state_change (vnet_main_t * vnm,
0);
vnet_hw_interface_walk_sw(vnm, hw_if_index,
adj_nbr_hw_sw_interface_state_change,
adj_mcast_hw_sw_interface_state_change,
(void*) sw_flags);
return (NULL);

View File

@ -366,62 +366,6 @@ mfib_entry_src_remove (mfib_entry_t *mfib_entry,
}
}
static void
mfib_entry_last_lock_gone (fib_node_t *node)
{
mfib_entry_t *mfib_entry;
mfib_entry_src_t *msrc;
mfib_entry = mfib_entry_from_fib_node(node);
dpo_reset(&mfib_entry->mfe_rep);
MFIB_ENTRY_DBG(mfib_entry, "last-lock");
vec_foreach(msrc, mfib_entry->mfe_srcs)
{
mfib_entry_src_flush(msrc);
}
vec_free(mfib_entry->mfe_srcs);
fib_node_deinit(&mfib_entry->mfe_node);
pool_put(mfib_entry_pool, mfib_entry);
}
/*
* mfib_entry_back_walk_notify
*
* A back walk has reach this entry.
*/
static fib_node_back_walk_rc_t
mfib_entry_back_walk_notify (fib_node_t *node,
fib_node_back_walk_ctx_t *ctx)
{
// FIXME - re-evalute
return (FIB_NODE_BACK_WALK_CONTINUE);
}
static void
mfib_entry_show_memory (void)
{
fib_show_memory_usage("multicast-Entry",
pool_elts(mfib_entry_pool),
pool_len(mfib_entry_pool),
sizeof(mfib_entry_t));
}
/*
* The MFIB entry's graph node virtual function table
*/
static const fib_node_vft_t mfib_entry_vft = {
.fnv_get = mfib_entry_get_node,
.fnv_last_lock = mfib_entry_last_lock_gone,
.fnv_back_walk = mfib_entry_back_walk_notify,
.fnv_mem_show = mfib_entry_show_memory,
};
u32
mfib_entry_child_add (fib_node_index_t mfib_entry_index,
fib_node_type_t child_type,
@ -464,6 +408,7 @@ mfib_entry_alloc (u32 fib_index,
mfib_entry->mfe_srcs = NULL;
mfib_entry->mfe_itfs = NULL;
mfib_entry->mfe_rpf_id = MFIB_RPF_ID_NONE;
mfib_entry->mfe_pl = FIB_NODE_INDEX_INVALID;
dpo_reset(&mfib_entry->mfe_rep);
@ -594,6 +539,15 @@ mfib_entry_stack (mfib_entry_t *mfib_entry,
dp = fib_proto_to_dpo(mfib_entry_get_proto(mfib_entry));
/*
* unlink the enty from the previous path list.
*/
if (FIB_NODE_INDEX_INVALID != mfib_entry->mfe_pl)
{
fib_path_list_child_remove(mfib_entry->mfe_pl,
mfib_entry->mfe_sibling);
}
if (NULL != msrc &&
FIB_NODE_INDEX_INVALID != msrc->mfes_pl)
{
@ -658,6 +612,17 @@ mfib_entry_stack (mfib_entry_t *mfib_entry,
dpo_reset(&ctx.next_hops[0].path_dpo);
vec_free(ctx.next_hops);
}
/*
* link the entry to the path-list.
* The entry needs to be a child so that we receive the back-walk
* updates to recalculate forwarding.
*/
mfib_entry->mfe_pl = msrc->mfes_pl;
mfib_entry->mfe_sibling =
fib_path_list_child_add(mfib_entry->mfe_pl,
FIB_NODE_TYPE_MFIB_ENTRY,
mfib_entry_get_index(mfib_entry));
}
else
{
@ -1098,6 +1063,62 @@ mfib_entry_cmp_for_sort (void *i1, void *i2)
*mfib_entry_index2));
}
static void
mfib_entry_last_lock_gone (fib_node_t *node)
{
mfib_entry_t *mfib_entry;
mfib_entry_src_t *msrc;
mfib_entry = mfib_entry_from_fib_node(node);
dpo_reset(&mfib_entry->mfe_rep);
MFIB_ENTRY_DBG(mfib_entry, "last-lock");
vec_foreach(msrc, mfib_entry->mfe_srcs)
{
mfib_entry_src_flush(msrc);
}
vec_free(mfib_entry->mfe_srcs);
fib_node_deinit(&mfib_entry->mfe_node);
pool_put(mfib_entry_pool, mfib_entry);
}
/*
* mfib_entry_back_walk_notify
*
* A back walk has reach this entry.
*/
static fib_node_back_walk_rc_t
mfib_entry_back_walk_notify (fib_node_t *node,
fib_node_back_walk_ctx_t *ctx)
{
mfib_entry_recalculate_forwarding(mfib_entry_from_fib_node(node));
return (FIB_NODE_BACK_WALK_CONTINUE);
}
static void
mfib_entry_show_memory (void)
{
fib_show_memory_usage("multicast-Entry",
pool_elts(mfib_entry_pool),
pool_len(mfib_entry_pool),
sizeof(mfib_entry_t));
}
/*
* The MFIB entry's graph node virtual function table
*/
static const fib_node_vft_t mfib_entry_vft = {
.fnv_get = mfib_entry_get_node,
.fnv_last_lock = mfib_entry_last_lock_gone,
.fnv_back_walk = mfib_entry_back_walk_notify,
.fnv_mem_show = mfib_entry_show_memory,
};
void
mfib_entry_lock (fib_node_index_t mfib_entry_index)
{

View File

@ -48,6 +48,16 @@ typedef struct mfib_entry_t_ {
*/
struct mfib_entry_src_t_ *mfe_srcs;
/**
* The path-list of which this entry is a child
*/
fib_node_index_t mfe_pl;
/**
* The sibling index on the path-list
*/
u32 mfe_sibling;
/**
* 2nd cache line has the members used in the data plane
*/

View File

@ -89,8 +89,8 @@ class TestIPMcast(VppTestCase):
capture.remove(p)
return capture
def verify_capture_ip4(self, src_if, sent):
rxd = self.pg1.get_capture(N_PKTS_IN_STREAM)
def verify_capture_ip4(self, rx_if, sent):
rxd = rx_if.get_capture(len(sent))
try:
capture = self.verify_filter(rxd, sent)
@ -118,8 +118,8 @@ class TestIPMcast(VppTestCase):
except:
raise
def verify_capture_ip6(self, src_if, sent):
capture = self.pg1.get_capture(N_PKTS_IN_STREAM)
def verify_capture_ip6(self, rx_if, sent):
capture = rx_if.get_capture(len(sent))
self.assertEqual(len(capture), len(sent))
@ -232,11 +232,6 @@ class TestIPMcast(VppTestCase):
# We expect replications on Pg1->7
self.verify_capture_ip4(self.pg1, tx)
self.verify_capture_ip4(self.pg2, tx)
self.verify_capture_ip4(self.pg3, tx)
self.verify_capture_ip4(self.pg4, tx)
self.verify_capture_ip4(self.pg5, tx)
self.verify_capture_ip4(self.pg6, tx)
self.verify_capture_ip4(self.pg7, tx)
# no replications on Pg0
self.pg0.assert_nothing_captured(
@ -259,11 +254,6 @@ class TestIPMcast(VppTestCase):
# We expect replications on Pg1->7
self.verify_capture_ip4(self.pg1, tx)
self.verify_capture_ip4(self.pg2, tx)
self.verify_capture_ip4(self.pg3, tx)
self.verify_capture_ip4(self.pg4, tx)
self.verify_capture_ip4(self.pg5, tx)
self.verify_capture_ip4(self.pg6, tx)
self.verify_capture_ip4(self.pg7, tx)
# no replications on Pg0
self.pg0.assert_nothing_captured(
@ -308,10 +298,10 @@ class TestIPMcast(VppTestCase):
self.verify_capture_ip4(self.pg1, tx)
self.verify_capture_ip4(self.pg2, tx)
self.verify_capture_ip4(self.pg3, tx)
# no replications on Pg0
self.pg0.assert_nothing_captured(
remark="IP multicast packets forwarded on PG0")
self.verify_capture_ip4(self.pg4, tx)
self.verify_capture_ip4(self.pg5, tx)
self.verify_capture_ip4(self.pg6, tx)
self.verify_capture_ip4(self.pg7, tx)
route_232_1_1_1.remove_vpp_config()
route_1_1_1_1_232_1_1_1.remove_vpp_config()
@ -408,6 +398,22 @@ class TestIPMcast(VppTestCase):
self.pg3.assert_nothing_captured(
remark="IP multicast packets forwarded on PG3")
#
# Bounce the interface and it should still work
#
self.pg1.admin_down()
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.assert_nothing_captured(
remark="IP multicast packets forwarded on down PG1")
self.pg1.admin_up()
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.verify_capture_ip6(self.pg1, tx)
#
# a stream that matches the route for (*,ff01::1)
#