ip-neighbor: Add flush API

Type: feature

Signed-off-by: Neale Ranns <nranns@cisco.com>
Change-Id: I96509c274895b917c3e220204d7959d9270de30d
This commit is contained in:
Neale Ranns
2020-04-23 09:04:59 +00:00
committed by Ole Trøan
parent f3783e1501
commit 240dcb24a0
5 changed files with 227 additions and 6 deletions

View File

@ -163,6 +163,22 @@ autoreply define ip_neighbor_replace_end
u32 context;
};
/** \brief IP neighbor flush request - removes *all* neighbours.
dynamic and static from API/CLI and dynamic from data-plane.
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
@param af - Flush neighbours of this address family
@param sw_if_index - Flush on this interface (~0 => all interfaces)
*/
autoreply define ip_neighbor_flush
{
u32 client_index;
u32 context;
vl_api_address_family_t af;
vl_api_interface_index_t sw_if_index [default=0xffffffff];
};
/** \brief Register for IP4 ARP resolution event on receiving ARP reply or
MAC/IP info from ARP requests in L2 BDs
@param client_index - opaque cookie to identify the sender

View File

@ -576,6 +576,40 @@ ip_neighbor_del (const ip46_address_t * ip, ip46_type_t type, u32 sw_if_index)
return (0);
}
typedef struct ip_neighbor_del_all_ctx_t_
{
index_t *ipn_del;
} ip_neighbor_del_all_ctx_t;
static walk_rc_t
ip_neighbor_del_all_walk_cb (index_t ipni, void *arg)
{
ip_neighbor_del_all_ctx_t *ctx = arg;
vec_add1 (ctx->ipn_del, ipni);
return (WALK_CONTINUE);
}
void
ip_neighbor_del_all (ip46_type_t type, u32 sw_if_index)
{
IP_NEIGHBOR_INFO ("delete-all: %U, %U",
format_ip46_type, type,
format_vnet_sw_if_index_name, vnet_get_main (),
sw_if_index);
ip_neighbor_del_all_ctx_t ctx = {
.ipn_del = NULL,
};
index_t *ipni;
ip_neighbor_walk (type, sw_if_index, ip_neighbor_del_all_walk_cb, &ctx);
vec_foreach (ipni, ctx.ipn_del) ip_neighbor_free (ip_neighbor_get (*ipni));
vec_free (ctx.ipn_del);
}
void
ip_neighbor_update (vnet_main_t * vnm, adj_index_t ai)
{

View File

@ -41,6 +41,8 @@ extern int ip_neighbor_del (const ip46_address_t * ip,
extern int ip_neighbor_config (ip46_type_t type, u32 limit, u32 age,
bool recycle);
extern void ip_neighbor_del_all (ip46_type_t type, u32 sw_if_index);
typedef walk_rc_t (*ip_neighbor_walk_cb_t) (index_t ipni, void *ctx);
extern void ip_neighbor_walk (ip46_type_t type,
u32 sw_if_index,

View File

@ -301,6 +301,25 @@ vl_api_ip_neighbor_replace_end_t_handler (vl_api_ip_neighbor_replace_end_t *
REPLY_MACRO (VL_API_IP_NEIGHBOR_REPLACE_END_REPLY);
}
static void
vl_api_ip_neighbor_flush_t_handler (vl_api_ip_neighbor_flush_t * mp)
{
vl_api_ip_neighbor_flush_reply_t *rmp;
ip_address_family_t af;
int rv;
if (mp->sw_if_index != ~0)
VALIDATE_SW_IF_INDEX (mp);
rv = ip_address_family_decode (mp->af, &af);
if (!rv)
ip_neighbor_del_all (ip46_type_from_af (af), ntohl (mp->sw_if_index));
BAD_SW_IF_INDEX_LABEL;
REPLY_MACRO (VL_API_IP_NEIGHBOR_FLUSH_REPLY);
}
#define vl_msg_name_crc_list
#include <vnet/ip-neighbor/ip_neighbor.api.h>
#undef vl_msg_name_crc_list

View File

@ -13,7 +13,7 @@ from vpp_papi import VppEnum
import scapy.compat
from scapy.packet import Raw
from scapy.layers.l2 import Ether, ARP, Dot1Q
from scapy.layers.inet import IP, UDP
from scapy.layers.inet import IP, UDP, TCP
from scapy.layers.inet6 import IPv6
from scapy.contrib.mpls import MPLS
from scapy.layers.inet6 import IPv6
@ -777,11 +777,7 @@ class ARPTestCase(VppTestCase):
# Send the ARP request with an originating address that
# is VPP's own address
#
self.pg2.add_stream(arp_req_from_me)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg2.get_capture(1)
rx = self.send_and_expect(self.pg2, [arp_req_from_me], self.pg2)
self.verify_arp_resp(rx[0],
self.pg2.local_mac,
self.pg2.remote_mac,
@ -795,6 +791,48 @@ class ARPTestCase(VppTestCase):
self.pg2.sw_if_index,
self.pg0.local_ip4))
#
# setup a punt redirect so packets from the uplink go to the tap
#
self.vapi.ip_punt_redirect(self.pg0.sw_if_index,
self.pg2.sw_if_index,
self.pg0.local_ip4)
p_tcp = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac,) /
IP(src=self.pg0.remote_ip4,
dst=self.pg0.local_ip4) /
TCP(sport=80, dport=80) /
Raw())
rx = self.send_and_expect(self.pg0, [p_tcp], self.pg2)
# there's no ARP entry so this is an ARP req
self.assertTrue(rx[0].haslayer(ARP))
# and ARP entry for VPP's pg0 address on the host interface
n1 = VppNeighbor(self,
self.pg2.sw_if_index,
self.pg2.remote_mac,
self.pg0.local_ip4,
is_no_fib_entry=True).add_vpp_config()
# now the packets shold forward
rx = self.send_and_expect(self.pg0, [p_tcp], self.pg2)
self.assertFalse(rx[0].haslayer(ARP))
self.assertEqual(rx[0][Ether].dst, self.pg2.remote_mac)
#
# flush the neighbor cache on the uplink
#
af = VppEnum.vl_api_address_family_t
self.vapi.ip_neighbor_flush(af.ADDRESS_IP4, self.pg0.sw_if_index)
# ensure we can still resolve the ARPs on the uplink
self.pg0.resolve_arp()
self.assertTrue(find_nbr(self,
self.pg0.sw_if_index,
self.pg0.remote_ip4))
#
# cleanup
#
@ -1966,5 +2004,117 @@ class NeighborReplaceTestCase(VppTestCase):
i.remote_hosts[h].ip6))
class NeighborFlush(VppTestCase):
""" Neighbor Flush """
@classmethod
def setUpClass(cls):
super(NeighborFlush, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(NeighborFlush, cls).tearDownClass()
def setUp(self):
super(NeighborFlush, self).setUp()
self.create_pg_interfaces(range(2))
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.config_ip6()
i.resolve_arp()
i.resolve_ndp()
def tearDown(self):
super(NeighborFlush, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.admin_down()
def test_flush(self):
""" Neighbour Flush """
e = VppEnum
nf = e.vl_api_ip_neighbor_flags_t
af = e.vl_api_address_family_t
N_HOSTS = 16
static = [False, True]
self.pg0.generate_remote_hosts(N_HOSTS)
self.pg1.generate_remote_hosts(N_HOSTS)
for s in static:
# a few v4 and v6 dynamic neoghbors
for n in range(N_HOSTS):
VppNeighbor(self,
self.pg0.sw_if_index,
self.pg0.remote_hosts[n].mac,
self.pg0.remote_hosts[n].ip4,
is_static=s).add_vpp_config()
VppNeighbor(self,
self.pg1.sw_if_index,
self.pg1.remote_hosts[n].mac,
self.pg1.remote_hosts[n].ip6,
is_static=s).add_vpp_config()
# flush the interfaces individually
self.vapi.ip_neighbor_flush(af.ADDRESS_IP4, self.pg0.sw_if_index)
# check we haven't flushed that which we shouldn't
for n in range(N_HOSTS):
self.assertTrue(find_nbr(self,
self.pg1.sw_if_index,
self.pg1.remote_hosts[n].ip6,
is_static=s))
self.vapi.ip_neighbor_flush(af.ADDRESS_IP6, self.pg1.sw_if_index)
for n in range(N_HOSTS):
self.assertFalse(find_nbr(self,
self.pg0.sw_if_index,
self.pg0.remote_hosts[n].ip4))
self.assertFalse(find_nbr(self,
self.pg1.sw_if_index,
self.pg1.remote_hosts[n].ip6))
# add the nieghbours back
for n in range(N_HOSTS):
VppNeighbor(self,
self.pg0.sw_if_index,
self.pg0.remote_hosts[n].mac,
self.pg0.remote_hosts[n].ip4,
is_static=s).add_vpp_config()
VppNeighbor(self,
self.pg1.sw_if_index,
self.pg1.remote_hosts[n].mac,
self.pg1.remote_hosts[n].ip6,
is_static=s).add_vpp_config()
self.logger.info(self.vapi.cli("sh ip neighbor"))
# flush both interfaces at the same time
self.vapi.ip_neighbor_flush(af.ADDRESS_IP6, 0xffffffff)
# check we haven't flushed that which we shouldn't
for n in range(N_HOSTS):
self.assertTrue(find_nbr(self,
self.pg0.sw_if_index,
self.pg0.remote_hosts[n].ip4,
is_static=s))
self.vapi.ip_neighbor_flush(af.ADDRESS_IP4, 0xffffffff)
for n in range(N_HOSTS):
self.assertFalse(find_nbr(self,
self.pg0.sw_if_index,
self.pg0.remote_hosts[n].ip4))
self.assertFalse(find_nbr(self,
self.pg1.sw_if_index,
self.pg1.remote_hosts[n].ip6))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)