memif: add num pkts received/sent per queue

Add memif stats per queue for performance tuning.

Type: improvement
Change-Id: Ifacc80c0adfe92075d91179857c8956d1cbf3a70
Signed-off-by: Dau Do <daudo@yahoo.com>
This commit is contained in:
Dau Do 2024-12-01 01:47:50 +00:00 committed by Damjan Marion
parent f7964e5fa6
commit dc9f5e9519
3 changed files with 69 additions and 3 deletions

View File

@ -67,12 +67,37 @@ format_memif_device (u8 * s, va_list * args)
u32 dev_instance = va_arg (*args, u32);
int verbose = va_arg (*args, int);
u32 indent = format_get_indent (s);
memif_main_t *mm = &memif_main;
memif_queue_t *mq;
uword i;
s = format (s, "MEMIF interface");
if (verbose)
{
s = format (s, "\n%U instance %u", format_white_space, indent + 2,
dev_instance);
memif_if_t *mif = pool_elt_at_index (mm->interfaces, dev_instance);
vec_foreach_index (i, mif->tx_queues)
{
mq = vec_elt_at_index (mif->tx_queues, i);
s = format (s, "\n%U master-to-slave ring %u", format_white_space,
indent + 4, i);
s = format (s, "\n%U packets sent: %u", format_white_space,
indent + 6, mq->n_packets);
s = format (s, "\n%U no tx slot: %u", format_white_space, indent + 6,
mq->no_free_tx);
s = format (s, "\n%U max no tx slot: %u", format_white_space,
indent + 6, mq->max_no_free_tx);
}
vec_foreach_index (i, mif->rx_queues)
{
mq = vec_elt_at_index (mif->rx_queues, i);
s = format (s, "\n%U slave-to-master ring %u", format_white_space,
indent + 4, i);
s = format (s, "\n%U packets received: %u", format_white_space,
indent + 6, mq->n_packets);
}
}
return s;
}
@ -111,12 +136,14 @@ memif_interface_tx_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
memif_region_index_t last_region = ~0;
void *last_region_shm = 0;
u16 head, tail;
u64 local_n_packets = 0;
ring = mq->ring;
ring_size = 1 << mq->log2_ring_size;
mask = ring_size - 1;
retry:
local_n_packets = 0;
if (type == MEMIF_RING_S2M)
{
@ -226,8 +253,10 @@ retry:
buffers++;
n_left--;
local_n_packets++;
}
no_free_slots:
mq->n_packets += local_n_packets;
/* copy data */
n_copy_op = vec_len (ptd->copy_ops);
@ -291,8 +320,10 @@ memif_interface_tx_zc_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
int n_retries = 5;
vlib_buffer_t *b0;
u16 head, tail;
u64 local_n_packets = 0;
retry:
local_n_packets = 0;
tail = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
slot = head = ring->head;
@ -358,8 +389,10 @@ retry:
/* next from */
buffers++;
n_left--;
local_n_packets++;
}
no_free_slots:
mq->n_packets += local_n_packets;
__atomic_store_n (&ring->head, slot, __ATOMIC_RELEASE);
@ -419,6 +452,7 @@ memif_interface_tx_dma_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
memif_per_thread_data_t *ptd;
memif_main_t *mm = &memif_main;
u16 mif_id = mif - mm->interfaces;
u64 local_n_packets = 0;
ring = mq->ring;
ring_size = 1 << mq->log2_ring_size;
@ -450,6 +484,7 @@ retry:
head = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE);
mq->last_tail += tail - mq->last_tail;
free_slots = head - mq->dma_tail;
local_n_packets = 0;
while (n_left && free_slots)
{
@ -543,8 +578,10 @@ retry:
buffers++;
n_left--;
local_n_packets += 1;
}
no_free_slots:
mq->n_packets += local_n_packets;
/* copy data */
n_copy_op = vec_len (ptd->copy_ops);
@ -676,8 +713,13 @@ VNET_DEVICE_CLASS_TX_FN (memif_device_class) (vlib_main_t * vm,
clib_spinlock_unlock (&mq->lockp);
if (n_left)
vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
n_left);
{
vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
n_left);
mq->no_free_tx += n_left;
if (n_left > mq->max_no_free_tx)
mq->max_no_free_tx = n_left;
}
if ((mq->ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1)
{
@ -721,7 +763,23 @@ memif_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
static void
memif_clear_hw_interface_counters (u32 instance)
{
/* Nothing for now */
memif_main_t *mm = &memif_main;
memif_queue_t *mq;
uword i;
memif_if_t *mif = pool_elt_at_index (mm->interfaces, instance);
vec_foreach_index (i, mif->tx_queues)
{
mq = vec_elt_at_index (mif->tx_queues, i);
mq->n_packets = 0;
mq->no_free_tx = 0;
mq->max_no_free_tx = 0;
}
vec_foreach_index (i, mif->rx_queues)
{
mq = vec_elt_at_index (mif->rx_queues, i);
mq->n_packets = 0;
}
}
static clib_error_t *

View File

@ -721,6 +721,7 @@ memif_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_increment_combined_counter (
vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
thread_index, mif->sw_if_index, ptd->n_packets, ptd->n_rx_bytes);
mq->n_packets += ptd->n_packets;
/* refill ring with empty buffers */
refill:
@ -981,6 +982,7 @@ memif_device_input_zc_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ VNET_INTERFACE_COUNTER_RX, thread_index,
mif->sw_if_index, n_rx_packets,
n_rx_bytes);
mq->n_packets += n_rx_packets;
/* refill ring with empty buffers */
refill:
@ -1166,6 +1168,7 @@ CLIB_MARCH_FN (memif_dma_completion_cb, void, vlib_main_t *vm,
vlib_increment_combined_counter (
vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
thread_index, mif->sw_if_index, ptd->n_packets, ptd->n_rx_bytes);
mq->n_packets += ptd->n_packets;
mq->dma_info_head++;
if (mq->dma_info_head == mq->dma_info_size)

View File

@ -150,6 +150,11 @@ typedef struct
u16 dma_info_size;
u8 dma_info_full;
/* packets received or sent */
u64 n_packets;
u64 no_free_tx;
u32 max_no_free_tx;
/* interrupts */
int int_fd;
uword int_clib_file_index;