dev: interrupt mode support

Type: improvement
Change-Id: I4a0578598182339bcf76e6b01da76b590a06f773
Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
Damjan Marion
2023-11-03 13:47:05 +00:00
committed by Andrew Yourtchenko
parent 29d07dbef9
commit b8dd9815ef
10 changed files with 307 additions and 155 deletions

View File

@ -143,6 +143,7 @@ vnet_dev_api_create_port_if (vlib_main_t *vm,
vnet_dev_t *dev = vnet_dev_by_id (args->device_id);
vnet_dev_port_t *port = 0;
u16 n_threads = vlib_get_n_threads ();
int default_is_intr_mode;
log_debug (dev,
"create_port_if: device '%s' port %u intf_name '%s' num_rx_q %u "
@ -168,6 +169,13 @@ vnet_dev_api_create_port_if (vlib_main_t *vm,
if (port->interface_created)
return VNET_DEV_ERR_ALREADY_EXISTS;
default_is_intr_mode = (args->flags.e & VNET_DEV_PORT_F_INTERRUPT_MODE) != 0;
if (default_is_intr_mode && port->attr.caps.interrupt_mode == 0)
{
log_err (dev, "interrupt mode requested and port doesn't support it");
return VNET_DEV_ERR_NOT_SUPPORTED;
}
if (args->num_rx_queues)
{
if (args->num_rx_queues > port->attr.max_rx_queues)
@ -207,6 +215,7 @@ vnet_dev_api_create_port_if (vlib_main_t *vm,
port->intf.txq_sz = port->tx_queue_config.default_size;
clib_memcpy (port->intf.name, args->intf_name, sizeof (port->intf.name));
port->intf.default_is_intr_mode = default_is_intr_mode;
return vnet_dev_process_call_port_op (vm, port, vnet_dev_port_if_create);
}

View File

@ -110,7 +110,9 @@ typedef struct
_ (MAX_FRAME_SIZE) \
_ (CHANGE_PRIMARY_HW_ADDR) \
_ (ADD_SECONDARY_HW_ADDR) \
_ (REMOVE_SECONDARY_HW_ADDR)
_ (REMOVE_SECONDARY_HW_ADDR) \
_ (RXQ_INTR_MODE_ENABLE) \
_ (RXQ_INTR_MODE_DISABLE)
typedef enum
{
@ -124,12 +126,14 @@ typedef struct vnet_dev_port_cfg_change_req
{
vnet_dev_port_cfg_type_t type;
u8 validated : 1;
u8 all_queues : 1;
union
{
u8 promisc : 1;
vnet_dev_hw_addr_t addr;
u16 max_frame_size;
vnet_dev_queue_id_t queue_id;
};
} vnet_dev_port_cfg_change_req_t;
@ -221,6 +225,8 @@ typedef struct vnet_dev_rx_queue
u16 index;
vnet_dev_counter_main_t *counter_main;
CLIB_CACHE_LINE_ALIGN_MARK (runtime0);
vnet_dev_rx_queue_t *next_on_thread;
u8 interrupt_mode : 1;
u8 enabled : 1;
u8 started : 1;
u8 suspended : 1;
@ -230,7 +236,7 @@ typedef struct vnet_dev_rx_queue
vnet_dev_rx_queue_rt_req_t runtime_request;
CLIB_CACHE_LINE_ALIGN_MARK (runtime1);
vlib_buffer_template_t buffer_template;
CLIB_ALIGN_MARK (private_data, 16);
CLIB_CACHE_LINE_ALIGN_MARK (driver_data);
u8 data[];
} vnet_dev_rx_queue_t;
@ -296,6 +302,7 @@ typedef struct vnet_dev_port
u8 feature_arc_index;
u8 feature_arc : 1;
u8 redirect_to_node : 1;
u8 default_is_intr_mode : 1;
u32 tx_node_index;
u32 hw_if_index;
u32 sw_if_index;
@ -568,27 +575,12 @@ void vnet_dev_poll_port_add (vlib_main_t *, vnet_dev_port_t *, f64,
void vnet_dev_poll_port_remove (vlib_main_t *, vnet_dev_port_t *,
vnet_dev_port_op_no_rv_t *);
/* runtime.c */
typedef enum
{
VNET_DEV_RT_OP_TYPE_UNKNOWN,
VNET_DEV_RT_OP_TYPE_RX_QUEUE,
} __clib_packed vnet_dev_rt_op_type_t;
typedef enum
{
VNET_DEV_RT_OP_ACTION_UNKNOWN,
VNET_DEV_RT_OP_ACTION_START,
VNET_DEV_RT_OP_ACTION_STOP,
} __clib_packed vnet_dev_rt_op_action_t;
typedef struct
{
u16 thread_index;
u8 type : 4;
u8 action : 4;
u8 completed;
vnet_dev_rx_queue_t *rx_queue;
u8 in_order;
vnet_dev_port_t *port;
} vnet_dev_rt_op_t;
vnet_dev_rv_t vnet_dev_rt_exec_ops (vlib_main_t *, vnet_dev_t *,
@ -619,8 +611,7 @@ unformat_function_t unformat_vnet_dev_port_flags;
typedef struct
{
u8 n_rx_queues;
vnet_dev_rx_queue_t *rx_queues[4];
vnet_dev_rx_queue_t *first_rx_queue;
} vnet_dev_rx_node_runtime_t;
STATIC_ASSERT (sizeof (vnet_dev_rx_node_runtime_t) <=

View File

@ -60,6 +60,20 @@ vnet_dev_get_port_from_dev_instance (u32 dev_instance)
return pool_elt_at_index (dm->ports_by_dev_instance, dev_instance)[0];
}
static_always_inline vnet_dev_port_t *
vnet_dev_get_port_from_hw_if_index (u32 hw_if_index)
{
vnet_hw_interface_t *hw;
vnet_dev_port_t *port;
hw = vnet_get_hw_interface (vnet_get_main (), hw_if_index);
port = vnet_dev_get_port_from_dev_instance (hw->dev_instance);
if (!port || port->intf.hw_if_index != hw_if_index)
return 0;
return port;
}
static_always_inline vnet_dev_t *
vnet_dev_by_id (char *id)
{
@ -119,6 +133,26 @@ vnet_dev_get_port_by_id (vnet_dev_t *dev, vnet_dev_port_id_t port_id)
return 0;
}
static_always_inline vnet_dev_rx_queue_t *
vnet_dev_port_get_rx_queue_by_id (vnet_dev_port_t *port,
vnet_dev_queue_id_t queue_id)
{
foreach_vnet_dev_port_rx_queue (q, port)
if (q->queue_id == queue_id)
return q;
return 0;
}
static_always_inline vnet_dev_tx_queue_t *
vnet_dev_port_get_tx_queue_by_id (vnet_dev_port_t *port,
vnet_dev_queue_id_t queue_id)
{
foreach_vnet_dev_port_tx_queue (q, port)
if (q->queue_id == queue_id)
return q;
return 0;
}
static_always_inline void *
vnet_dev_alloc_with_data (u32 sz, u32 data_sz)
{
@ -181,22 +215,24 @@ vnet_dev_get_tx_node_runtime (vlib_node_runtime_t *node)
return (void *) node->runtime_data;
}
static_always_inline vnet_dev_rx_queue_t **
foreach_vnet_dev_rx_queue_runtime_helper (vlib_node_runtime_t *node)
{
vnet_dev_rx_node_runtime_t *rt = vnet_dev_get_rx_node_runtime (node);
return rt->rx_queues;
}
static_always_inline int
vnet_dev_rx_queue_runtime_update (vnet_dev_rx_queue_t *rxq)
static_always_inline vnet_dev_rx_queue_t *
foreach_vnet_dev_rx_queue_runtime_helper (vlib_node_runtime_t *node,
vnet_dev_rx_queue_t *rxq)
{
vnet_dev_port_t *port;
vnet_dev_rx_queue_rt_req_t req;
int rv = 1;
if (rxq == 0)
rxq = vnet_dev_get_rx_node_runtime (node)->first_rx_queue;
else
next:
rxq = rxq->next_on_thread;
if (PREDICT_FALSE (rxq == 0))
return 0;
if (PREDICT_TRUE (rxq->runtime_request.as_number == 0))
return 1;
return rxq;
req.as_number =
__atomic_exchange_n (&rxq->runtime_request.as_number, 0, __ATOMIC_ACQUIRE);
@ -215,15 +251,20 @@ vnet_dev_rx_queue_runtime_update (vnet_dev_rx_queue_t *rxq)
if (req.suspend_on)
{
rxq->suspended = 1;
rv = 0;
goto next;
}
if (req.suspend_off)
rxq->suspended = 0;
return rv;
return rxq;
}
#define foreach_vnet_dev_rx_queue_runtime(q, node) \
for (vnet_dev_rx_queue_t * (q) = \
foreach_vnet_dev_rx_queue_runtime_helper (node, 0); \
q; (q) = foreach_vnet_dev_rx_queue_runtime_helper (node, q))
static_always_inline void *
vnet_dev_get_rt_temp_space (vlib_main_t *vm)
{
@ -240,12 +281,4 @@ vnet_dev_set_hw_addr_eth_mac (vnet_dev_hw_addr_t *addr, const u8 *eth_mac_addr)
*addr = ha;
}
#define foreach_vnet_dev_rx_queue_runtime(q, node) \
for (vnet_dev_rx_queue_t * \
*__qp = foreach_vnet_dev_rx_queue_runtime_helper (node), \
**__last = __qp + (vnet_dev_get_rx_node_runtime (node))->n_rx_queues, \
*(q) = *__qp; \
__qp < __last; __qp++, (q) = *__qp) \
if (vnet_dev_rx_queue_runtime_update (q))
#endif /* _VNET_DEV_FUNCS_H_ */

View File

@ -146,9 +146,10 @@ format_vnet_dev_rx_queue_info (u8 *s, va_list *args)
s = format (s, "Size is %u, buffer pool index is %u", rxq->size,
vnet_dev_get_rx_queue_buffer_pool_index (rxq));
s = format (s, "\n%UPolling thread is %u, %sabled, %sstarted",
s = format (s, "\n%UPolling thread is %u, %sabled, %sstarted, %s mode",
format_white_space, indent, rxq->rx_thread_index,
rxq->enabled ? "en" : "dis", rxq->started ? "" : "not-");
rxq->enabled ? "en" : "dis", rxq->started ? "" : "not-",
rxq->interrupt_mode ? "interrupt" : "polling");
return s;
}

View File

@ -173,7 +173,34 @@ clib_error_t *
vnet_dev_rx_mode_change_fn (vnet_main_t *vnm, u32 hw_if_index, u32 qid,
vnet_hw_if_rx_mode mode)
{
return clib_error_return (0, "not supported");
vlib_main_t *vm = vlib_get_main ();
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
vnet_dev_port_t *port =
vnet_dev_get_port_from_dev_instance (hw->dev_instance);
vnet_dev_rv_t rv;
if (!port)
return clib_error_return (0, "not for us");
if (qid >= (vnet_dev_queue_id_t) ~0)
return clib_error_return (0, "not supported");
vnet_dev_port_cfg_change_req_t req = {
.type = mode == VNET_HW_IF_RX_MODE_POLLING ?
VNET_DEV_PORT_CFG_RXQ_INTR_MODE_DISABLE :
VNET_DEV_PORT_CFG_RXQ_INTR_MODE_ENABLE,
.queue_id = qid,
};
if ((rv = vnet_dev_port_cfg_change_req_validate (vm, port, &req)))
return vnet_dev_port_err (
vm, port, rv, "rx queue interupt mode enable/disable not supported");
if ((rv = vnet_dev_process_port_cfg_change_req (vm, port, &req)))
return vnet_dev_port_err (
vm, port, rv, "device failed to enable/disable queue interrupt mode");
return 0;
}
void

View File

@ -2,16 +2,15 @@
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include "vlib/pci/pci.h"
#include <vnet/vnet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/pci.h>
#include <vnet/dev/log.h>
#include <vlib/unix/unix.h>
VLIB_REGISTER_LOG_CLASS (dev_log, static) = {
.class_name = "dev",
.subclass_name = "pci",
.default_syslog_level = VLIB_LOG_LEVEL_DEBUG,
};
static int
@ -317,6 +316,18 @@ vnet_dev_pci_msix_add_handler (vlib_main_t *vm, vnet_dev_t *dev,
return VNET_DEV_OK;
}
void
vnet_dev_pci_msix_set_polling_thread (vlib_main_t *vm, vnet_dev_t *dev,
u16 line, u16 thread_index)
{
vlib_pci_dev_handle_t h = vnet_dev_get_pci_handle (dev);
u32 index;
index = vlib_pci_get_msix_file_index (vm, h, line);
clib_file_set_polling_thread (&file_main, index, thread_index);
}
vnet_dev_rv_t
vnet_dev_pci_msix_remove_handler (vlib_main_t *vm, vnet_dev_t *dev, u16 first,
u16 count)

View File

@ -74,5 +74,7 @@ vnet_dev_rv_t vnet_dev_pci_msix_remove_handler (vlib_main_t *, vnet_dev_t *,
vnet_dev_rv_t vnet_dev_pci_msix_enable (vlib_main_t *, vnet_dev_t *, u16, u16);
vnet_dev_rv_t vnet_dev_pci_msix_disable (vlib_main_t *, vnet_dev_t *, u16,
u16);
void vnet_dev_pci_msix_set_polling_thread (vlib_main_t *, vnet_dev_t *, u16,
u16);
#endif /* _VNET_DEV_PCI_H_ */

View File

@ -122,20 +122,15 @@ vnet_dev_port_stop (vlib_main_t *vm, vnet_dev_port_t *port)
{
vnet_dev_t *dev = port->dev;
vnet_dev_rt_op_t *ops = 0;
u16 n_threads = vlib_get_n_threads ();
log_debug (dev, "stopping port %u", port->port_id);
foreach_vnet_dev_port_rx_queue (q, port)
if (q->started)
{
vnet_dev_rt_op_t op = {
.type = VNET_DEV_RT_OP_TYPE_RX_QUEUE,
.action = VNET_DEV_RT_OP_ACTION_STOP,
.thread_index = q->rx_thread_index,
.rx_queue = q,
};
vec_add1 (ops, op);
}
for (u16 i = 0; i < n_threads; i++)
{
vnet_dev_rt_op_t op = { .thread_index = i, .port = port };
vec_add1 (ops, op);
}
vnet_dev_rt_exec_ops (vm, dev, ops, vec_len (ops));
vec_free (ops);
@ -195,6 +190,7 @@ vnet_dev_port_start_all_tx_queues (vlib_main_t *vm, vnet_dev_port_t *port)
vnet_dev_rv_t
vnet_dev_port_start (vlib_main_t *vm, vnet_dev_port_t *port)
{
u16 n_threads = vlib_get_n_threads ();
vnet_dev_t *dev = port->dev;
vnet_dev_rt_op_t *ops = 0;
vnet_dev_rv_t rv;
@ -211,17 +207,11 @@ vnet_dev_port_start (vlib_main_t *vm, vnet_dev_port_t *port)
return rv;
}
foreach_vnet_dev_port_rx_queue (q, port)
if (q->enabled)
{
vnet_dev_rt_op_t op = {
.type = VNET_DEV_RT_OP_TYPE_RX_QUEUE,
.action = VNET_DEV_RT_OP_ACTION_START,
.thread_index = q->rx_thread_index,
.rx_queue = q,
};
vec_add1 (ops, op);
}
for (u16 i = 0; i < n_threads; i++)
{
vnet_dev_rt_op_t op = { .thread_index = i, .port = port };
vec_add1 (ops, op);
}
vnet_dev_rt_exec_ops (vm, dev, ops, vec_len (ops));
vec_free (ops);
@ -356,10 +346,24 @@ vnet_dev_port_cfg_change (vlib_main_t *vm, vnet_dev_port_t *port,
{
vnet_dev_rv_t rv = VNET_DEV_OK;
vnet_dev_hw_addr_t *a;
vnet_dev_rx_queue_t *rxq = 0;
u8 enable = 0;
vnet_dev_port_validate (vm, port);
vnet_dev_port_cfg_change_req_validate (vm, port, req);
if (req->type == VNET_DEV_PORT_CFG_RXQ_INTR_MODE_ENABLE ||
req->type == VNET_DEV_PORT_CFG_RXQ_INTR_MODE_DISABLE)
{
if (req->all_queues == 0)
{
rxq = vnet_dev_port_get_rx_queue_by_id (port, req->queue_id);
if (rxq == 0)
return VNET_DEV_ERR_BUG;
}
}
if ((rv = vnet_dev_port_cfg_change_req_validate (vm, port, req)))
return rv;
if (port->port_ops.config_change)
rv = port->port_ops.config_change (vm, port, req);
@ -377,6 +381,43 @@ vnet_dev_port_cfg_change (vlib_main_t *vm, vnet_dev_port_t *port,
port->promisc = req->promisc;
break;
case VNET_DEV_PORT_CFG_RXQ_INTR_MODE_ENABLE:
enable = 1;
case VNET_DEV_PORT_CFG_RXQ_INTR_MODE_DISABLE:
if (req->all_queues)
{
clib_bitmap_t *bmp = 0;
vnet_dev_rt_op_t *ops = 0;
u32 i;
foreach_vnet_dev_port_rx_queue (q, port)
{
q->interrupt_mode = enable;
bmp = clib_bitmap_set (bmp, q->rx_thread_index, 1);
}
clib_bitmap_foreach (i, bmp)
{
vnet_dev_rt_op_t op = { .port = port, .thread_index = i };
vec_add1 (ops, op);
}
vnet_dev_rt_exec_ops (vm, port->dev, ops, vec_len (ops));
clib_bitmap_free (bmp);
vec_free (ops);
}
else
{
rxq->interrupt_mode = enable;
vnet_dev_rt_exec_ops (vm, port->dev,
&(vnet_dev_rt_op_t){
.port = port,
.thread_index = rxq->rx_thread_index,
},
1);
}
break;
case VNET_DEV_PORT_CFG_CHANGE_PRIMARY_HW_ADDR:
clib_memcpy (&port->primary_hw_addr, &req->addr,
sizeof (vnet_dev_hw_addr_t));
@ -602,6 +643,7 @@ vnet_dev_port_if_create (vlib_main_t *vm, vnet_dev_port_t *port)
port->intf.sw_if_index;
/* poison to catch node not calling runtime update function */
q->next_index = ~0;
q->interrupt_mode = port->intf.default_is_intr_mode;
vnet_dev_rx_queue_rt_request (
vm, q, (vnet_dev_rx_queue_rt_req_t){ .update_next_index = 1 });
}

View File

@ -19,49 +19,36 @@ static vnet_dev_rt_op_t *rt_ops;
static void
_vnet_dev_rt_exec_op (vlib_main_t *vm, vnet_dev_rt_op_t *op)
{
if (op->type == VNET_DEV_RT_OP_TYPE_RX_QUEUE)
vnet_dev_port_t *port = op->port;
vnet_dev_rx_queue_t *previous = 0, *first = 0;
vnet_dev_rx_node_runtime_t *rtd;
vlib_node_state_t state = VLIB_NODE_STATE_DISABLED;
u32 node_index = port->intf.rx_node_index;
rtd = vlib_node_get_runtime_data (vm, node_index);
foreach_vnet_dev_port_rx_queue (q, port)
{
vnet_dev_rx_node_runtime_t *rtd;
vnet_dev_rx_queue_t *rxq = op->rx_queue;
u32 i, node_index = rxq->port->intf.rx_node_index;
if (q->rx_thread_index != vm->thread_index)
continue;
rtd = vlib_node_get_runtime_data (vm, node_index);
if (q->interrupt_mode == 0)
state = VLIB_NODE_STATE_POLLING;
else if (state != VLIB_NODE_STATE_POLLING)
state = VLIB_NODE_STATE_INTERRUPT;
if (op->action == VNET_DEV_RT_OP_ACTION_START)
{
for (i = 0; i < rtd->n_rx_queues; i++)
ASSERT (rtd->rx_queues[i] != op->rx_queue);
rtd->rx_queues[rtd->n_rx_queues++] = op->rx_queue;
}
q->next_on_thread = 0;
if (previous == 0)
first = q;
else
previous->next_on_thread = q;
else if (op->action == VNET_DEV_RT_OP_ACTION_STOP)
{
for (i = 0; i < rtd->n_rx_queues; i++)
if (rtd->rx_queues[i] == op->rx_queue)
break;
ASSERT (i < rtd->n_rx_queues);
rtd->n_rx_queues--;
for (; i < rtd->n_rx_queues; i++)
rtd->rx_queues[i] = rtd->rx_queues[i + 1];
}
if (rtd->n_rx_queues == 1)
vlib_node_set_state (vm, node_index, VLIB_NODE_STATE_POLLING);
else if (rtd->n_rx_queues == 0)
vlib_node_set_state (vm, node_index, VLIB_NODE_STATE_DISABLED);
__atomic_store_n (&op->completed, 1, __ATOMIC_RELEASE);
previous = q;
}
}
static int
_vnet_dev_rt_op_not_occured_before (vnet_dev_rt_op_t *first,
vnet_dev_rt_op_t *current)
{
for (vnet_dev_rt_op_t *op = first; op < current; op++)
if (op->rx_queue == current->rx_queue && op->completed == 0)
return 0;
return 1;
rtd->first_rx_queue = first;
vlib_node_set_state (vm, port->intf.rx_node_index, state);
__atomic_store_n (&op->completed, 1, __ATOMIC_RELEASE);
}
static uword
@ -69,25 +56,26 @@ vnet_dev_rt_mgmt_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_frame_t *frame)
{
u16 thread_index = vm->thread_index;
vnet_dev_rt_op_t *ops = __atomic_load_n (&rt_ops, __ATOMIC_ACQUIRE);
vnet_dev_rt_op_t *op;
int come_back = 0;
vnet_dev_rt_op_t *op, *ops = __atomic_load_n (&rt_ops, __ATOMIC_ACQUIRE);
u32 n_pending = 0;
uword rv = 0;
vec_foreach (op, ops)
if (op->thread_index == thread_index)
{
if (_vnet_dev_rt_op_not_occured_before (ops, op))
{
_vnet_dev_rt_exec_op (vm, op);
rv++;
}
else
come_back = 1;
}
{
if (!op->completed && op->thread_index == thread_index)
{
if (op->in_order == 1 && n_pending)
{
vlib_node_set_interrupt_pending (vm, node->node_index);
return rv;
}
_vnet_dev_rt_exec_op (vm, op);
rv++;
}
if (come_back)
vlib_node_set_interrupt_pending (vm, node->node_index);
if (op->completed == 0)
n_pending++;
}
return rv;
}
@ -99,25 +87,6 @@ VLIB_REGISTER_NODE (vnet_dev_rt_mgmt_node, static) = {
.state = VLIB_NODE_STATE_INTERRUPT,
};
u8 *
format_vnet_dev_mgmt_op (u8 *s, va_list *args)
{
vnet_dev_rt_op_t *op = va_arg (*args, vnet_dev_rt_op_t *);
char *types[] = {
[VNET_DEV_RT_OP_TYPE_RX_QUEUE] = "rx queue",
};
char *actions[] = {
[VNET_DEV_RT_OP_ACTION_START] = "start",
[VNET_DEV_RT_OP_ACTION_STOP] = "stop",
};
return format (s, "port %u %s %u %s on thread %u",
op->rx_queue->port->port_id, types[op->type],
op->rx_queue->queue_id, actions[op->action],
op->thread_index);
}
vnet_dev_rv_t
vnet_dev_rt_exec_ops (vlib_main_t *vm, vnet_dev_t *dev, vnet_dev_rt_op_t *ops,
u32 n_ops)
@ -129,23 +98,56 @@ vnet_dev_rt_exec_ops (vlib_main_t *vm, vnet_dev_t *dev, vnet_dev_rt_op_t *ops,
ASSERT (rt_ops == 0);
if (vlib_worker_thread_barrier_held ())
{
for (op = ops; op < (ops + n_ops); op++)
{
vlib_main_t *tvm = vlib_get_main_by_index (op->thread_index);
_vnet_dev_rt_exec_op (tvm, op);
log_debug (
dev,
"port %u rx node runtime update on thread %u executed locally",
op->port->port_id, op->thread_index);
}
return VNET_DEV_OK;
}
while (n_ops)
{
if (op->thread_index != vm->thread_index)
break;
_vnet_dev_rt_exec_op (vm, op);
log_debug (
dev, "port %u rx node runtime update on thread %u executed locally",
op->port->port_id, op->thread_index);
op++;
n_ops--;
}
if (n_ops == 0)
return VNET_DEV_OK;
for (op = ops; op < (ops + n_ops); op++)
{
vlib_main_t *tvm = vlib_get_main_by_index (op->thread_index);
if ((vlib_worker_thread_barrier_held ()) ||
(op->thread_index == vm->thread_index &&
_vnet_dev_rt_op_not_occured_before (ops, op)))
if (op->thread_index == vm->thread_index &&
(op->in_order == 0 || vec_len (remote_ops) == 0))
{
_vnet_dev_rt_exec_op (tvm, op);
log_debug (dev, "%U executed locally", format_vnet_dev_mgmt_op, op);
continue;
_vnet_dev_rt_exec_op (vm, op);
log_debug (dev,
"port %u rx node runtime update on thread "
"%u executed locally",
op->port->port_id, op->thread_index);
}
else
{
vec_add1 (remote_ops, *op);
log_debug (dev,
"port %u rx node runtime update on thread %u "
"enqueued for remote execution",
op->port->port_id, op->thread_index);
remote_bmp = clib_bitmap_set (remote_bmp, op->thread_index, 1);
}
vec_add1 (remote_ops, *op);
log_debug (dev, "%U enqueued for remote execution",
format_vnet_dev_mgmt_op, op);
remote_bmp = clib_bitmap_set (remote_bmp, op->thread_index, 1);
}
if (remote_ops == 0)
@ -164,7 +166,11 @@ vnet_dev_rt_exec_ops (vlib_main_t *vm, vnet_dev_t *dev, vnet_dev_rt_op_t *ops,
vec_foreach (op, remote_ops)
{
while (op->completed == 0)
CLIB_PAUSE ();
vlib_process_suspend (vm, 5e-5);
log_debug (
dev, "port %u rx node runtime update on thread %u executed locally",
op->port->port_id, op->thread_index);
}
__atomic_store_n (&rt_ops, 0, __ATOMIC_RELAXED);

View File

@ -54,6 +54,9 @@
#include <vnet/interface/rx_queue_funcs.h>
#include <vnet/interface/tx_queue_funcs.h>
#include <vnet/hash/hash.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/dev_funcs.h>
static int
compare_interface_names (void *a1, void *a2)
{
@ -1516,6 +1519,33 @@ set_hw_interface_change_rx_mode (vnet_main_t * vnm, u32 hw_if_index,
clib_error_t *error = 0;
vnet_hw_interface_t *hw;
u32 *queue_indices = 0;
vnet_dev_port_t *port;
port = vnet_dev_get_port_from_hw_if_index (hw_if_index);
if (port)
{
vlib_main_t *vm = vlib_get_main ();
vnet_dev_rv_t rv;
vnet_dev_port_cfg_change_req_t req = {
.type = mode == VNET_HW_IF_RX_MODE_POLLING ?
VNET_DEV_PORT_CFG_RXQ_INTR_MODE_DISABLE :
VNET_DEV_PORT_CFG_RXQ_INTR_MODE_ENABLE,
.queue_id = queue_id_valid ? queue_id : 0,
.all_queues = queue_id_valid ? 0 : 1,
};
if ((rv = vnet_dev_port_cfg_change_req_validate (vm, port, &req)))
return vnet_dev_port_err (
vm, port, rv, "rx queue interupt mode enable/disable not supported");
if ((rv = vnet_dev_process_port_cfg_change_req (vm, port, &req)))
return vnet_dev_port_err (
vm, port, rv,
"device failed to enable/disable queue interrupt mode");
return 0;
}
hw = vnet_get_hw_interface (vnm, hw_if_index);