memif: Process bad descriptors correctly in memif_process_desc

When there is a bad descriptor, it may in the beginning, in the middle,
or at the end of the batch if the batch has more than 3 descriptors.
When processing a bad descriptor is encountered in the batch, we need to
rollback n_buffers in memif_process_desc(), or the statement in the same
function
	    memif_add_copy_op (ptd, mb0 + src_off, bytes_to_copy,
                               dst_off, n_buffers - 1);
is wrong because it picks up the wrong buffer_vec_index of the bad
descriptor while parsing a good descriptor immediately following the
bad descriptor. n_buffers was incremented in the beginning of
while (n_left) loop.

The other problem is we should count the number of bad packets and
reduce ptd->n_packets to get the correct number of packets for subsequent
processing in device_input.

The last fix is to check if n_buffers == 0 in device_input and skip
doing any descriptor copy. This case can happen when all the descriptors
are bad in the batch.

Type: fix

Signed-off-by: Steven Luong <sluong@cisco.com>
Change-Id: I28ed1d87236b045657006755747b5750a9a733be
This commit is contained in:
Steven Luong
2022-08-29 10:00:31 -07:00
committed by Damjan Marion
parent 76b8aa00f7
commit fcb2132d74

View File

@ -248,6 +248,7 @@ memif_process_desc (vlib_main_t *vm, vlib_node_runtime_t *node,
u32 n_left = ptd->n_packets;
u32 packet_len;
int i = -1;
int bad_packets = 0;
/* construct copy and packet vector out of ring slots */
while (n_left)
@ -268,7 +269,14 @@ memif_process_desc (vlib_main_t *vm, vlib_node_runtime_t *node,
mb0 = desc_data[i];
if (PREDICT_FALSE (desc_status[i].err))
vlib_error_count (vm, node->node_index, MEMIF_INPUT_ERROR_BAD_DESC, 1);
{
vlib_error_count (vm, node->node_index, MEMIF_INPUT_ERROR_BAD_DESC,
1);
bad_packets++;
ASSERT (n_buffers > 0);
n_buffers--;
goto next_packet;
}
else
do
{
@ -298,9 +306,12 @@ memif_process_desc (vlib_main_t *vm, vlib_node_runtime_t *node,
po->packet_len = packet_len;
po++;
next_packet:
/* next packet */
n_left--;
}
ASSERT (ptd->n_packets >= bad_packets);
ptd->n_packets -= bad_packets;
return n_buffers;
}
static_always_inline void
@ -462,6 +473,21 @@ memif_fill_buffer_mdata (vlib_main_t *vm, vlib_node_runtime_t *node,
}
}
static_always_inline void
memif_advance_ring (memif_ring_type_t type, memif_queue_t *mq,
memif_ring_t *ring, u16 cur_slot)
{
if (type == MEMIF_RING_S2M)
{
__atomic_store_n (&ring->tail, cur_slot, __ATOMIC_RELEASE);
mq->last_head = cur_slot;
}
else
{
mq->last_tail = cur_slot;
}
}
static_always_inline uword
memif_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
memif_if_t *mif, memif_ring_type_t type, u16 qid,
@ -533,6 +559,13 @@ memif_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
else
n_buffers = memif_process_desc (vm, node, ptd, mif);
if (PREDICT_FALSE (n_buffers == 0))
{
/* All descriptors are bad. Release slots in the ring and bail */
memif_advance_ring (type, mq, ring, cur_slot);
goto refill;
}
/* allocate free buffers */
vec_validate_aligned (ptd->buffers, n_buffers - 1, CLIB_CACHE_LINE_BYTES);
n_alloc = vlib_buffer_alloc_from_pool (vm, ptd->buffers, n_buffers,
@ -588,15 +621,7 @@ memif_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
}
/* release slots from the ring */
if (type == MEMIF_RING_S2M)
{
__atomic_store_n (&ring->tail, cur_slot, __ATOMIC_RELEASE);
mq->last_head = cur_slot;
}
else
{
mq->last_tail = cur_slot;
}
memif_advance_ring (type, mq, ring, cur_slot);
/* prepare buffer template and next indices */
vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_RX] = mif->sw_if_index;