wireguard: fix fib entry tracking

Type: fix

After peers roaming support addition, FIB entry tracking stopped
working. For example, it can be observed when an adjacency is stacked on
a FIB entry by the plugin and the FIB entry hasn't got ARP resolution
yet. Once the FIB entry gets ARP resolution, the adjacency is not
re-stacked as it used to. This results in endless ARP requests when a
traffic is sent via the adjacency.

This is broken because the plugin stopped using "midchain delegate" with
peers roaming support addition. The reason is that "midchain delegate"
didn't support stacking on a different FIB entry which is needed when
peer's endpoint changes. Now it is supported there (added in 36892).

With this fix, start using "midchane delegate" again and thus, fix FIB
entry tracking. Also, cover this in tests.

Signed-off-by: Alexander Chernavin <achernavin@netgate.com>
Change-Id: Iea91f38739ab129e601fd6567b52565dbd649371
This commit is contained in:
Alexander Chernavin
2022-08-17 08:30:43 +00:00
parent d5e4e25849
commit ae60538925
3 changed files with 118 additions and 53 deletions

View File

@ -16,7 +16,6 @@
#include <vnet/adj/adj_midchain.h>
#include <vnet/fib/fib_table.h>
#include <vnet/fib/fib_entry_track.h>
#include <wireguard/wireguard_peer.h>
#include <wireguard/wireguard_if.h>
#include <wireguard/wireguard_messages.h>
@ -64,14 +63,13 @@ wg_peer_clear (vlib_main_t * vm, wg_peer_t * peer)
wg_peer_endpoint_reset (&peer->src);
wg_peer_endpoint_reset (&peer->dst);
wg_peer_adj_t *peer_adj;
vec_foreach (peer_adj, peer->adjs)
adj_index_t *adj_index;
vec_foreach (adj_index, peer->adj_indices)
{
wg_peer_by_adj_index[peer_adj->adj_index] = INDEX_INVALID;
if (FIB_NODE_INDEX_INVALID != peer_adj->fib_entry_index)
fib_entry_untrack (peer_adj->fib_entry_index, peer_adj->sibling_index);
if (adj_is_valid (peer_adj->adj_index))
adj_nbr_midchain_unstack (peer_adj->adj_index);
wg_peer_by_adj_index[*adj_index] = INDEX_INVALID;
if (adj_is_valid (*adj_index))
adj_midchain_delegate_unstack (*adj_index);
}
peer->input_thread_index = ~0;
peer->output_thread_index = ~0;
@ -87,7 +85,7 @@ wg_peer_clear (vlib_main_t * vm, wg_peer_t * peer)
peer->timer_need_another_keepalive = false;
vec_free (peer->rewrite);
vec_free (peer->allowed_ips);
vec_free (peer->adjs);
vec_free (peer->adj_indices);
}
static void
@ -99,17 +97,17 @@ wg_peer_init (vlib_main_t * vm, wg_peer_t * peer)
}
static void
wg_peer_adj_stack (wg_peer_t *peer, wg_peer_adj_t *peer_adj)
wg_peer_adj_stack (wg_peer_t *peer, adj_index_t ai)
{
ip_adjacency_t *adj;
u32 sw_if_index;
wg_if_t *wgi;
fib_protocol_t fib_proto;
if (!adj_is_valid (peer_adj->adj_index))
if (!adj_is_valid (ai))
return;
adj = adj_get (peer_adj->adj_index);
adj = adj_get (ai);
sw_if_index = adj->rewrite_header.sw_if_index;
u8 is_ip4 = ip46_address_is_ip4 (&peer->src.addr);
fib_proto = is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
@ -122,7 +120,7 @@ wg_peer_adj_stack (wg_peer_t *peer, wg_peer_adj_t *peer_adj)
if (!vnet_sw_interface_is_admin_up (vnet_get_main (), wgi->sw_if_index) ||
!wg_peer_can_send (peer))
{
adj_nbr_midchain_unstack (peer_adj->adj_index);
adj_midchain_delegate_unstack (ai);
}
else
{
@ -136,16 +134,17 @@ wg_peer_adj_stack (wg_peer_t *peer, wg_peer_adj_t *peer_adj)
u32 fib_index;
fib_index = fib_table_find (fib_proto, peer->table_id);
peer_adj->fib_entry_index =
fib_entry_track (fib_index, &dst, FIB_NODE_TYPE_ADJ,
peer_adj->adj_index, &peer_adj->sibling_index);
adj_nbr_midchain_stack_on_fib_entry (
peer_adj->adj_index, peer_adj->fib_entry_index,
fib_forw_chain_type_from_fib_proto (dst.fp_proto));
adj_midchain_delegate_stack (ai, fib_index, &dst);
}
}
static void
wg_peer_adj_reset_stacking (adj_index_t ai)
{
adj_midchain_delegate_remove (ai);
}
static void
wg_peer_66_fixup (vlib_main_t *vm, const ip_adjacency_t *adj, vlib_buffer_t *b,
const void *data)
@ -207,11 +206,11 @@ walk_rc_t
wg_peer_if_admin_state_change (index_t peeri, void *data)
{
wg_peer_t *peer;
wg_peer_adj_t *peer_adj;
adj_index_t *adj_index;
peer = wg_peer_get (peeri);
vec_foreach (peer_adj, peer->adjs)
vec_foreach (adj_index, peer->adj_indices)
{
wg_peer_adj_stack (peer, peer_adj);
wg_peer_adj_stack (peer, *adj_index);
}
return (WALK_CONTINUE);
}
@ -224,7 +223,6 @@ wg_peer_if_adj_change (index_t peeri, void *data)
ip_adjacency_t *adj;
wg_peer_t *peer;
fib_prefix_t *allowed_ip;
wg_peer_adj_t *peer_adj;
adj = adj_get (*adj_index);
@ -234,10 +232,7 @@ wg_peer_if_adj_change (index_t peeri, void *data)
if (fib_prefix_is_cover_addr_46 (allowed_ip,
&adj->sub_type.nbr.next_hop))
{
vec_add2 (peer->adjs, peer_adj, 1);
peer_adj->adj_index = *adj_index;
peer_adj->fib_entry_index = FIB_NODE_INDEX_INVALID;
peer_adj->sibling_index = ~0;
vec_add1 (peer->adj_indices, *adj_index);
vec_validate_init_empty (wg_peer_by_adj_index, *adj_index,
INDEX_INVALID);
@ -248,7 +243,7 @@ wg_peer_if_adj_change (index_t peeri, void *data)
ADJ_FLAG_MIDCHAIN_IP_STACK,
vec_dup (peer->rewrite));
wg_peer_adj_stack (peer, peer_adj);
wg_peer_adj_stack (peer, *adj_index);
return (WALK_STOP);
}
}
@ -342,25 +337,19 @@ wg_peer_update_endpoint (index_t peeri, const ip46_address_t *addr, u16 port)
peer->rewrite = wg_build_rewrite (&peer->src.addr, peer->src.port,
&peer->dst.addr, peer->dst.port, is_ip4);
wg_peer_adj_t *peer_adj;
vec_foreach (peer_adj, peer->adjs)
adj_index_t *adj_index;
vec_foreach (adj_index, peer->adj_indices)
{
if (FIB_NODE_INDEX_INVALID != peer_adj->fib_entry_index)
{
fib_entry_untrack (peer_adj->fib_entry_index,
peer_adj->sibling_index);
peer_adj->fib_entry_index = FIB_NODE_INDEX_INVALID;
peer_adj->sibling_index = ~0;
}
if (adj_is_valid (peer_adj->adj_index))
if (adj_is_valid (*adj_index))
{
adj_midchain_fixup_t fixup =
wg_peer_get_fixup (peer, adj_get_link_type (peer_adj->adj_index));
adj_nbr_midchain_update_rewrite (peer_adj->adj_index, fixup, NULL,
wg_peer_get_fixup (peer, adj_get_link_type (*adj_index));
adj_nbr_midchain_update_rewrite (*adj_index, fixup, NULL,
ADJ_FLAG_MIDCHAIN_IP_STACK,
vec_dup (peer->rewrite));
wg_peer_adj_stack (peer, peer_adj);
wg_peer_adj_reset_stacking (*adj_index);
wg_peer_adj_stack (peer, *adj_index);
}
}
}
@ -507,9 +496,9 @@ format_wg_peer (u8 * s, va_list * va)
{
index_t peeri = va_arg (*va, index_t);
fib_prefix_t *allowed_ip;
adj_index_t *adj_index;
u8 key[NOISE_KEY_LEN_BASE64];
wg_peer_t *peer;
wg_peer_adj_t *peer_adj;
peer = wg_peer_get (peeri);
key_to_base64 (peer->remote.r_public, NOISE_PUBLIC_KEY_LEN, key);
@ -522,9 +511,9 @@ format_wg_peer (u8 * s, va_list * va)
peer->wg_sw_if_index, peer->persistent_keepalive_interval, peer->flags,
pool_elts (peer->api_clients));
s = format (s, "\n adj:");
vec_foreach (peer_adj, peer->adjs)
vec_foreach (adj_index, peer->adj_indices)
{
s = format (s, " %d", peer_adj->adj_index);
s = format (s, " %d", *adj_index);
}
s = format (s, "\n key:%=s %U", key, format_hex_bytes,
peer->remote.r_public, NOISE_PUBLIC_KEY_LEN);

View File

@ -68,13 +68,6 @@ typedef enum
WG_PEER_ESTABLISHED = 0x2,
} wg_peer_flags;
typedef struct wg_peer_adj_t_
{
adj_index_t adj_index;
fib_node_index_t fib_entry_index;
u32 sibling_index;
} wg_peer_adj_t;
typedef struct wg_peer
{
noise_remote_t remote;
@ -87,7 +80,7 @@ typedef struct wg_peer
wg_peer_endpoint_t dst;
wg_peer_endpoint_t src;
u32 table_id;
wg_peer_adj_t *adjs;
adj_index_t *adj_indices;
/* rewrite built from address information */
u8 *rewrite;

View File

@ -2328,3 +2328,86 @@ class WireguardHandoffTests(TestWg):
@unittest.skip("test disabled")
def test_wg_multi_interface(self):
"""Multi-tunnel on the same port"""
class TestWgFIB(VppTestCase):
"""Wireguard FIB Test Case"""
@classmethod
def setUpClass(cls):
super(TestWgFIB, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestWgFIB, cls).tearDownClass()
def setUp(self):
super(TestWgFIB, self).setUp()
self.create_pg_interfaces(range(2))
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
super(TestWgFIB, self).tearDown()
def test_wg_fib_tracking(self):
"""FIB tracking"""
port = 12323
# create wg interface
wg0 = VppWgInterface(self, self.pg1.local_ip4, port).add_vpp_config()
wg0.admin_up()
wg0.config_ip4()
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# create a peer
peer_1 = VppWgPeer(
self, wg0, self.pg1.remote_ip4, port + 1, ["10.11.3.0/24"]
).add_vpp_config()
self.assertEqual(len(self.vapi.wireguard_peers_dump()), 1)
# create a route to rewrite traffic into the wg interface
r1 = VppIpRoute(
self, "10.11.3.0", 24, [VppRoutePath("10.11.3.1", wg0.sw_if_index)]
).add_vpp_config()
# resolve ARP and expect the adjacency to update
self.pg1.resolve_arp()
# wait for the peer to send a handshake initiation
rxs = self.pg1.get_capture(2, timeout=6)
# prepare and send a handshake response
# expect a keepalive message
resp = peer_1.consume_init(rxs[1], self.pg1)
rxs = self.send_and_expect(self.pg1, [resp], self.pg1)
# verify the keepalive message
b = peer_1.decrypt_transport(rxs[0])
self.assertEqual(0, len(b))
# prepare and send a packet that will be rewritten into the wg interface
# expect a data packet sent to the new endpoint
p = (
Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)
/ IP(src=self.pg0.remote_ip4, dst="10.11.3.2")
/ UDP(sport=555, dport=556)
/ Raw()
)
rxs = self.send_and_expect(self.pg0, [p], self.pg1)
# verify the data packet
peer_1.validate_encapped(rxs, p)
# remove configs
r1.remove_vpp_config()
peer_1.remove_vpp_config()
wg0.remove_vpp_config()