fib: fix the drop counter for ipv6 RPF failures

Type: fix

the only change to the mfib forwarding node is to set the error code, the rest is checkstyle formatting.

The traces previously showed some bogus reason:

00:04:27:325550: ip6-mfib-forward-rpf
  entry 10 itf -1 flags
00:04:27:325551: ip6-drop
    fib:0 adj:10 flow:0
  UDP: fe80::b203:eaff:fe02:604 -> ff02::1:2
    tos 0x00, flow label 0x651ed, hop limit 1, payload length 64
  UDP: 546 -> 547
    length 64, checksum 0xec9a
00:04:27:325551: error-drop
  rx:GigabitEthernet6/0/0
00:04:27:325553: drop
  ip6-input: drops due to concurrent reassemblies limit

Signed-off-by: Neale Ranns <neale@graphiant.com>
Change-Id: I294684c36edc346b4ebdd83ba66888b3b2197704
This commit is contained in:
Neale Ranns
2021-10-12 07:49:37 +00:00
committed by Damjan Marion
parent 74a25859bd
commit e8f57d593e
3 changed files with 35 additions and 23 deletions

View File

@@ -40,6 +40,7 @@
#ifndef included_ip_ip6_error_h
#define included_ip_ip6_error_h
// clang-format off
#define foreach_ip6_error \
/* Must be first. */ \
_ (NONE, "valid ip6 packets") \
@@ -76,6 +77,9 @@
_ (OUTACL_TABLE_MISS, "output ACL table-miss drops") \
_ (OUTACL_SESSION_DENY, "output ACL session deny drops") \
\
/* Errors from mfib-forward */ \
_ (RPF_FAILURE, "Multicast RPF check failed") \
\
/* Errors signalled by ip6-reassembly */ \
_ (REASS_MISSING_UPPER, "missing-upper layer drops") \
_ (REASS_DUPLICATE_FRAGMENT, "duplicate fragments") \
@@ -87,6 +91,8 @@
_ (REASS_INTERNAL_ERROR, "drops due to internal reassembly error") \
_ (REASS_UNSUPP_IP_PROTO, "unsupported ip protocol")
// clang-format on
typedef enum
{
#define _(sym,str) IP6_ERROR_##sym,

View File

@@ -443,33 +443,33 @@ mfib_forward_rpf (vlib_main_t * vm,
else
{
next0 = MFIB_FORWARD_RPF_NEXT_DROP;
error0 = IP4_ERROR_RPF_FAILURE;
}
error0 =
(is_v4 ? IP4_ERROR_RPF_FAILURE : IP6_ERROR_RPF_FAILURE);
}
b0->error = error0 ? error_node->errors[error0] : 0;
b0->error = error0 ? error_node->errors[error0] : 0;
if (b0->flags & VLIB_BUFFER_IS_TRACED)
{
mfib_forward_rpf_trace_t *t0;
if (b0->flags & VLIB_BUFFER_IS_TRACED)
{
mfib_forward_rpf_trace_t *t0;
t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
t0->entry_index = mfei0;
t0->itf_flags = iflags0;
if (NULL == mfi0)
{
t0->sw_if_index = ~0;
}
else
{
t0->sw_if_index = mfi0->mfi_sw_if_index;
}
}
vlib_validate_buffer_enqueue_x1 (vm, node, next,
to_next, n_left_to_next,
pi0, next0);
}
t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
t0->entry_index = mfei0;
t0->itf_flags = iflags0;
if (NULL == mfi0)
{
t0->sw_if_index = ~0;
}
else
{
t0->sw_if_index = mfi0->mfi_sw_if_index;
}
}
vlib_validate_buffer_enqueue_x1 (vm, node, next, to_next,
n_left_to_next, pi0, next0);
}
vlib_put_next_frame(vm, node, next, n_left_to_next);
vlib_put_next_frame (vm, node, next, n_left_to_next);
}
return frame->n_vectors;

View File

@@ -211,6 +211,9 @@ class TestIPMcast(VppTestCase):
self.pg0.assert_nothing_captured(
remark="IP multicast packets forwarded on default route")
count = self.statistics.get_err_counter(
"/err/ip4-input/Multicast RPF check failed")
self.assertEqual(count, len(tx))
#
# A (*,G).
@@ -510,6 +513,9 @@ class TestIPMcast(VppTestCase):
self.vapi.cli("clear trace")
tx = self.create_stream_ip6(self.pg1, "2002::1", "ff01:2::255")
self.send_and_assert_no_replies(self.pg1, tx, "RPF miss")
count = self.statistics.get_err_counter(
"/err/ip6-input/Multicast RPF check failed")
self.assertEqual(count, 2 * len(tx))
#
# a stream that matches the route for (*, ff01::/16)