dev: new device driver infra

Type: feature
Change-Id: I20c56e0d3103624407f18365c2bc1273dea5c199
Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
Damjan Marion
2023-10-17 16:06:26 +00:00
parent d3ef00098c
commit 38c619115b
35 changed files with 5466 additions and 5 deletions

View File

@ -21,15 +21,22 @@ ForEachMacros:
- 'vec_foreach_pointer'
- 'vlib_foreach_rx_tx'
- 'foreach_int'
- 'foreach_pointer'
- 'foreach_vlib_main'
- 'foreach_set_bit_index'
- 'foreach_vlib_frame_bitmap_set_bit_index'
- 'FOREACH_ARRAY_ELT'
- 'RTE_ETH_FOREACH_DEV'
- 'foreach_vnet_dev_rx_queue_runtime'
- 'foreach_vnet_dev_counter'
- 'foreach_vnet_dev_port_rx_queue'
- 'foreach_vnet_dev_port_tx_queue'
- 'foreach_vnet_dev_port'
StatementMacros:
- 'CLIB_MULTIARCH_FN'
- 'VLIB_NODE_FN'
- 'VNET_DEV_NODE_FN'
- 'VNET_DEVICE_CLASS_TX_FN'
- '__clib_section'
- '__clib_aligned'

View File

@ -98,6 +98,11 @@ I: policer
M: Neale Ranns <neale@graphiant.com>
F: src/vnet/policer/
VNET New Device Drivers Infra
I: dev
M: Damjan Marion <damarion@cisco.com>
F: src/vnet/dev/
VNET Device Drivers
I: devices
Y: src/vnet/devices/pipe/FEATURE.yaml

View File

@ -885,6 +885,27 @@ vlib_pci_register_intx_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h,
return 0;
}
clib_error_t *
vlib_pci_unregister_intx_handler (vlib_main_t *vm, vlib_pci_dev_handle_t h)
{
linux_pci_device_t *p = linux_pci_get_device (h);
linux_pci_irq_t *irq = &p->intx_irq;
if (irq->intx_handler == 0)
return 0;
clib_file_del_by_index (&file_main, irq->clib_file_index);
if (p->type == LINUX_PCI_DEVICE_TYPE_VFIO)
{
close (irq->fd);
irq->fd = -1;
}
irq->intx_handler = 0;
return 0;
}
clib_error_t *
vlib_pci_register_msix_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h,
u32 start, u32 count,
@ -942,6 +963,33 @@ error:
return err;
}
clib_error_t *
vlib_pci_unregister_msix_handler (vlib_main_t *vm, vlib_pci_dev_handle_t h,
u32 start, u32 count)
{
clib_error_t *err = 0;
linux_pci_device_t *p = linux_pci_get_device (h);
u32 i;
if (p->type != LINUX_PCI_DEVICE_TYPE_VFIO)
return clib_error_return (0, "vfio driver is needed for MSI-X interrupt "
"support");
for (i = start; i < start + count; i++)
{
linux_pci_irq_t *irq = vec_elt_at_index (p->msix_irqs, i);
if (irq->fd != -1)
{
clib_file_del_by_index (&file_main, irq->clib_file_index);
close (irq->fd);
irq->fd = -1;
}
}
return err;
}
clib_error_t *
vlib_pci_enable_msix_irq (vlib_main_t * vm, vlib_pci_dev_handle_t h,
u16 start, u16 count)

View File

@ -240,11 +240,16 @@ clib_error_t *vlib_pci_register_intx_handler (vlib_main_t * vm,
vlib_pci_dev_handle_t h,
pci_intx_handler_function_t *
intx_handler);
clib_error_t *vlib_pci_unregister_intx_handler (vlib_main_t *vm,
vlib_pci_dev_handle_t h);
clib_error_t *vlib_pci_register_msix_handler (vlib_main_t * vm,
vlib_pci_dev_handle_t h,
u32 start, u32 count,
pci_msix_handler_function_t *
msix_handler);
clib_error_t *vlib_pci_unregister_msix_handler (vlib_main_t *vm,
vlib_pci_dev_handle_t h,
u32 start, u32 count);
clib_error_t *vlib_pci_enable_msix_irq (vlib_main_t * vm,
vlib_pci_dev_handle_t h, u16 start,
u16 count);

View File

@ -26,6 +26,19 @@ list(APPEND VNET_SOURCES
config.c
devices/devices.c
devices/netlink.c
dev/api.c
dev/cli.c
dev/config.c
dev/counters.c
dev/dev.c
dev/error.c
dev/format.c
dev/handlers.c
dev/pci.c
dev/port.c
dev/process.c
dev/queue.c
dev/runtime.c
error.c
flow/flow.c
flow/flow_cli.c
@ -59,6 +72,7 @@ list(APPEND VNET_HEADERS
config.h
devices/devices.h
devices/netlink.h
dev/dev.h
flow/flow.h
global_funcs.h
interface/rx_queue_funcs.h

241
src/vnet/dev/api.c Normal file
View File

@ -0,0 +1,241 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include "vppinfra/pool.h"
#include <vnet/vnet.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/counters.h>
#include <vnet/dev/log.h>
#include <vnet/dev/api.h>
VLIB_REGISTER_LOG_CLASS (dev_log, static) = {
.class_name = "dev",
.subclass_name = "api",
};
static int
_vnet_dev_queue_size_validate (u32 size, vnet_dev_queue_config_t c)
{
if (size < c.min_size)
return 0;
if (size > c.max_size)
return 0;
if (c.size_is_power_of_two && count_set_bits (size) != 1)
return 0;
if (c.multiplier && size % c.multiplier)
return 0;
return 1;
}
vnet_dev_rv_t
vnet_dev_api_attach (vlib_main_t *vm, vnet_dev_api_attach_args_t *args)
{
vnet_dev_main_t *dm = &vnet_dev_main;
vnet_dev_t *dev = 0;
vnet_dev_rv_t rv = VNET_DEV_OK;
vnet_dev_bus_t *bus;
vnet_dev_driver_t *driver;
void *bus_dev_info = 0;
u8 *dev_desc = 0;
log_debug (0, "%s driver %s flags '%U' args '%v'", args->device_id,
args->driver_name, format_vnet_dev_flags, &args->flags,
args->args);
if (vnet_dev_by_id (args->device_id))
return VNET_DEV_ERR_ALREADY_IN_USE;
bus = vnet_dev_find_device_bus (vm, args->device_id);
if (!bus)
{
log_err (dev, "unknown bus");
rv = VNET_DEV_ERR_INVALID_BUS;
goto done;
}
bus_dev_info = vnet_dev_get_device_info (vm, args->device_id);
if (!bus_dev_info)
{
log_err (dev, "invalid or unsupported device id");
rv = VNET_DEV_ERR_INVALID_DEVICE_ID;
goto done;
}
vec_foreach (driver, dm->drivers)
{
if (args->driver_name[0] &&
strcmp (args->driver_name, driver->registration->name))
continue;
if (driver->ops.probe &&
(dev_desc = driver->ops.probe (vm, bus->index, bus_dev_info)))
break;
}
if (!dev_desc)
{
log_err (dev, "driver not available for %s", args->device_id);
rv = VNET_DEV_ERR_DRIVER_NOT_AVAILABLE;
goto done;
}
dev = vnet_dev_alloc (vm, args->device_id, driver);
if (!dev)
{
log_err (dev, "dev alloc failed for %s", args->device_id);
rv = VNET_DEV_ERR_BUG;
goto done;
}
dev->description = dev_desc;
if ((args->flags.e & VNET_DEV_F_NO_STATS) == 0)
dev->poll_stats = 1;
log_debug (0, "found '%v'", dev->description);
rv = vnet_dev_process_call_op (vm, dev, vnet_dev_init);
done:
if (bus_dev_info)
bus->ops.free_device_info (vm, bus_dev_info);
if (rv != VNET_DEV_OK && dev)
vnet_dev_process_call_op_no_rv (vm, dev, vnet_dev_free);
return rv;
}
vnet_dev_rv_t
vnet_dev_api_detach (vlib_main_t *vm, vnet_dev_api_detach_args_t *args)
{
vnet_dev_t *dev = vnet_dev_by_id (args->device_id);
log_debug (dev, "detach");
if (dev)
return vnet_dev_process_call_op_no_rv (vm, dev, vnet_dev_detach);
return VNET_DEV_ERR_NOT_FOUND;
}
vnet_dev_rv_t
vnet_dev_api_reset (vlib_main_t *vm, vnet_dev_api_reset_args_t *args)
{
vnet_dev_t *dev = vnet_dev_by_id (args->device_id);
log_debug (dev, "detach");
if (!dev)
return VNET_DEV_ERR_NOT_FOUND;
if (dev->ops.reset)
return VNET_DEV_ERR_NOT_SUPPORTED;
return vnet_dev_process_call_op (vm, dev, vnet_dev_reset);
}
vnet_dev_rv_t
vnet_dev_api_create_port_if (vlib_main_t *vm,
vnet_dev_api_create_port_if_args_t *args)
{
vnet_dev_t *dev = vnet_dev_by_id (args->device_id);
vnet_dev_port_t *port = 0;
u16 n_threads = vlib_get_n_threads ();
log_debug (dev,
"create_port_if: device '%s' port %u intf_name '%s' num_rx_q %u "
"num_tx_q %u rx_q_sz %u tx_q_sz %u, flags '%U' args '%v'",
args->device_id, args->port_id, args->intf_name,
args->num_rx_queues, args->num_tx_queues, args->rx_queue_size,
args->tx_queue_size, format_vnet_dev_port_flags, &args->flags,
args->args);
if (dev == 0)
return VNET_DEV_ERR_NOT_FOUND;
foreach_vnet_dev_port (p, dev)
if (p->port_id == args->port_id)
{
port = p;
break;
}
if (!port)
return VNET_DEV_ERR_INVALID_DEVICE_ID;
if (port->interface_created)
return VNET_DEV_ERR_ALREADY_EXISTS;
if (args->num_rx_queues)
{
if (args->num_rx_queues > port->attr.max_rx_queues)
return VNET_DEV_ERR_INVALID_NUM_RX_QUEUES;
port->intf.num_rx_queues = args->num_rx_queues;
}
else
port->intf.num_rx_queues = clib_min (port->attr.max_tx_queues, 1);
if (args->num_tx_queues)
{
if (args->num_tx_queues > port->attr.max_tx_queues)
return VNET_DEV_ERR_INVALID_NUM_TX_QUEUES;
port->intf.num_tx_queues = args->num_tx_queues;
}
else
port->intf.num_tx_queues = clib_min (port->attr.max_tx_queues, n_threads);
if (args->rx_queue_size)
{
if (!_vnet_dev_queue_size_validate (args->rx_queue_size,
port->rx_queue_config))
return VNET_DEV_ERR_INVALID_RX_QUEUE_SIZE;
port->intf.rxq_sz = args->rx_queue_size;
}
else
port->intf.rxq_sz = port->rx_queue_config.default_size;
if (args->tx_queue_size)
{
if (!_vnet_dev_queue_size_validate (args->tx_queue_size,
port->tx_queue_config))
return VNET_DEV_ERR_INVALID_TX_QUEUE_SIZE;
port->intf.txq_sz = args->tx_queue_size;
}
else
port->intf.txq_sz = port->tx_queue_config.default_size;
clib_memcpy (port->intf.name, args->intf_name, sizeof (port->intf.name));
return vnet_dev_process_call_port_op (vm, port, vnet_dev_port_if_create);
}
vnet_dev_rv_t
vnet_dev_api_remove_port_if (vlib_main_t *vm,
vnet_dev_api_remove_port_if_args_t *args)
{
vnet_dev_main_t *dm = &vnet_dev_main;
vnet_main_t *vnm = vnet_get_main ();
vnet_sw_interface_t *si;
vnet_hw_interface_t *hi;
vnet_dev_port_t *port;
si = vnet_get_sw_interface_or_null (vnm, args->sw_if_index);
if (!si)
return VNET_DEV_ERR_UNKNOWN_INTERFACE;
hi = vnet_get_hw_interface_or_null (vnm, si->hw_if_index);
if (!hi)
return VNET_DEV_ERR_UNKNOWN_INTERFACE;
if (pool_is_free_index (dm->ports_by_dev_instance, hi->dev_instance))
return VNET_DEV_ERR_UNKNOWN_INTERFACE;
port = vnet_dev_get_port_from_dev_instance (hi->dev_instance);
if (port->intf.hw_if_index != si->hw_if_index)
return VNET_DEV_ERR_UNKNOWN_INTERFACE;
return vnet_dev_process_call_port_op (vm, port, vnet_dev_port_if_remove);
}

62
src/vnet/dev/api.h Normal file
View File

@ -0,0 +1,62 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _VNET_DEV_API_H_
#define _VNET_DEV_API_H_
#include <vppinfra/clib.h>
#include <vnet/vnet.h>
#include <vnet/dev/types.h>
typedef struct
{
vnet_dev_device_id_t device_id;
vnet_dev_driver_name_t driver_name;
vnet_dev_flags_t flags;
u8 *args;
} vnet_dev_api_attach_args_t;
vnet_dev_rv_t vnet_dev_api_attach (vlib_main_t *,
vnet_dev_api_attach_args_t *);
typedef struct
{
vnet_dev_device_id_t device_id;
} vnet_dev_api_detach_args_t;
vnet_dev_rv_t vnet_dev_api_detach (vlib_main_t *,
vnet_dev_api_detach_args_t *);
typedef struct
{
vnet_dev_device_id_t device_id;
} vnet_dev_api_reset_args_t;
vnet_dev_rv_t vnet_dev_api_reset (vlib_main_t *, vnet_dev_api_reset_args_t *);
typedef struct
{
vnet_dev_device_id_t device_id;
vnet_dev_if_name_t intf_name;
u16 num_rx_queues;
u16 num_tx_queues;
u16 rx_queue_size;
u16 tx_queue_size;
vnet_dev_port_id_t port_id;
vnet_dev_port_flags_t flags;
u8 *args;
} vnet_dev_api_create_port_if_args_t;
vnet_dev_rv_t
vnet_dev_api_create_port_if (vlib_main_t *,
vnet_dev_api_create_port_if_args_t *);
typedef struct
{
u32 sw_if_index;
} vnet_dev_api_remove_port_if_args_t;
vnet_dev_rv_t
vnet_dev_api_remove_port_if (vlib_main_t *,
vnet_dev_api_remove_port_if_args_t *);
#endif /* _VNET_DEV_API_H_ */

315
src/vnet/dev/cli.c Normal file

File diff suppressed because it is too large Load Diff

182
src/vnet/dev/config.c Normal file
View File

@ -0,0 +1,182 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include "vppinfra/error.h"
#include "vppinfra/pool.h"
#include <vnet/vnet.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/api.h>
#include <vnet/dev/log.h>
VLIB_REGISTER_LOG_CLASS (dev_log, static) = {
.class_name = "dev",
.subclass_name = "config",
};
static clib_error_t *
vnet_dev_config_one_interface (vlib_main_t *vm, unformat_input_t *input,
vnet_dev_api_create_port_if_args_t *args)
{
clib_error_t *err = 0;
log_debug (0, "port %u %U", args->port_id, format_unformat_input, input);
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
u32 n;
if (unformat (input, "name %U", unformat_c_string_array, args->intf_name,
sizeof (args->intf_name)))
;
else if (unformat (input, "num-rx-queues %u", &n))
args->num_rx_queues = n;
else if (unformat (input, "num-tx-queues %u", &n))
args->num_tx_queues = n;
else if (unformat (input, "rx-queue-size %u", &n))
args->rx_queue_size = n;
else if (unformat (input, "tx-queue-size %u", &n))
args->tx_queue_size = n;
else if (unformat (input, "flags %U", unformat_vnet_dev_port_flags,
&args->flags))
;
else
{
err = clib_error_return (0, "unknown input '%U'",
format_unformat_error, input);
break;
}
}
return err;
}
static clib_error_t *
vnet_dev_config_one_device (vlib_main_t *vm, unformat_input_t *input,
char *device_id)
{
log_debug (0, "device %s %U", device_id, format_unformat_input, input);
clib_error_t *err = 0;
vnet_dev_api_attach_args_t args = {};
vnet_dev_api_create_port_if_args_t *if_args_vec = 0, *if_args;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
unformat_input_t sub_input;
u32 n;
if (unformat (input, "driver %U", unformat_c_string_array,
args.driver_name, sizeof (args.driver_name)))
;
else if (unformat (input, "flags %U", unformat_vnet_dev_flags,
&args.flags))
;
else if (unformat (input, "port %u %U", &n, unformat_vlib_cli_sub_input,
&sub_input))
{
vnet_dev_api_create_port_if_args_t *if_args;
vec_add2 (if_args_vec, if_args, 1);
if_args->port_id = n;
err = vnet_dev_config_one_interface (vm, &sub_input, if_args);
unformat_free (&sub_input);
if (err)
break;
}
else
{
err = clib_error_return (0, "unknown input '%U'",
format_unformat_error, input);
break;
}
}
if (err == 0)
{
vnet_dev_rv_t rv;
clib_memcpy (args.device_id, device_id, sizeof (args.device_id));
rv = vnet_dev_api_attach (vm, &args);
if (rv == VNET_DEV_OK)
{
vec_foreach (if_args, if_args_vec)
{
clib_memcpy (if_args->device_id, device_id,
sizeof (if_args->device_id));
rv = vnet_dev_api_create_port_if (vm, if_args);
if (rv != VNET_DEV_OK)
break;
}
if (rv != VNET_DEV_OK)
err = clib_error_return (0, "error: %U for device '%s'",
format_vnet_dev_rv, rv, device_id);
}
}
vec_free (if_args_vec);
return err;
}
uword
dev_config_process_node_fn (vlib_main_t *vm, vlib_node_runtime_t *rt,
vlib_frame_t *f)
{
vnet_dev_main_t *dm = &vnet_dev_main;
unformat_input_t input;
clib_error_t *err = 0;
if (dm->startup_config == 0)
return 0;
unformat_init_vector (&input, dm->startup_config);
dm->startup_config = 0;
while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
{
unformat_input_t sub_input;
vnet_dev_device_id_t device_id;
if (unformat (&input, "dev %U %U", unformat_c_string_array, device_id,
sizeof (device_id), unformat_vlib_cli_sub_input,
&sub_input))
{
err = vnet_dev_config_one_device (vm, &sub_input, device_id);
unformat_free (&sub_input);
if (err)
break;
}
else
{
err = clib_error_return (0, "unknown input '%U'",
format_unformat_error, input);
break;
}
}
unformat_free (&input);
vlib_node_set_state (vm, rt->node_index, VLIB_NODE_STATE_DISABLED);
vlib_node_rename (vm, rt->node_index, "deleted-%u", rt->node_index);
vec_add1 (dm->free_process_node_indices, rt->node_index);
return 0;
}
VLIB_REGISTER_NODE (dev_config_process_node) = {
.function = dev_config_process_node_fn,
.type = VLIB_NODE_TYPE_PROCESS,
.name = "dev-config",
};
static clib_error_t *
devices_config (vlib_main_t *vm, unformat_input_t *input)
{
vnet_dev_main_t *dm = &vnet_dev_main;
uword c;
while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT)
vec_add1 (dm->startup_config, c);
return 0;
}
VLIB_CONFIG_FUNCTION (devices_config, "devices");

132
src/vnet/dev/counters.c Normal file
View File

@ -0,0 +1,132 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include <vnet/vnet.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/counters.h>
#include <vnet/dev/log.h>
#include <vnet/interface/rx_queue_funcs.h>
VLIB_REGISTER_LOG_CLASS (dev_log, static) = {
.class_name = "dev",
.subclass_name = "counters",
};
vnet_dev_counter_main_t *
vnet_dev_counters_alloc (vlib_main_t *vm, vnet_dev_counter_t *counters,
u16 n_counters, char *fmt, ...)
{
vnet_dev_counter_t *c;
vnet_dev_counter_main_t *cm;
u32 alloc_sz;
alloc_sz = sizeof (*cm) + n_counters * sizeof (*c);
cm = clib_mem_alloc_aligned (alloc_sz, CLIB_CACHE_LINE_BYTES);
clib_memset (cm, 0, sizeof (*cm));
cm->n_counters = n_counters;
if (fmt && strlen (fmt))
{
va_list va;
va_start (va, fmt);
cm->desc = va_format (0, fmt, &va);
va_end (va);
}
for (u32 i = 0; i < n_counters; i++)
{
cm->counters[i] = counters[i];
cm->counters[i].index = i;
}
vec_validate_aligned (cm->counter_data, n_counters - 1,
CLIB_CACHE_LINE_BYTES);
vec_validate_aligned (cm->counter_start, n_counters - 1,
CLIB_CACHE_LINE_BYTES);
return cm;
}
void
vnet_dev_counters_clear (vlib_main_t *vm, vnet_dev_counter_main_t *cm)
{
for (int i = 0; i < cm->n_counters; i++)
{
cm->counter_start[i] = cm->counter_data[i];
cm->counter_data[i] = 0;
}
}
void
vnet_dev_counters_free (vlib_main_t *vm, vnet_dev_counter_main_t *cm)
{
vec_free (cm->desc);
vec_free (cm->counter_data);
vec_free (cm->counter_start);
clib_mem_free (cm);
}
u8 *
format_vnet_dev_counter_name (u8 *s, va_list *va)
{
vnet_dev_counter_t *c = va_arg (*va, vnet_dev_counter_t *);
char *std_counters[] = {
[VNET_DEV_CTR_TYPE_RX_BYTES] = "total bytes received",
[VNET_DEV_CTR_TYPE_TX_BYTES] = "total bytes transmitted",
[VNET_DEV_CTR_TYPE_RX_PACKETS] = "total packets received",
[VNET_DEV_CTR_TYPE_TX_PACKETS] = "total packets transmitted",
[VNET_DEV_CTR_TYPE_RX_DROPS] = "total drops received",
[VNET_DEV_CTR_TYPE_TX_DROPS] = "total drops transmitted",
};
char *directions[] = {
[VNET_DEV_CTR_DIR_RX] = "received",
[VNET_DEV_CTR_DIR_TX] = "sent",
};
char *units[] = {
[VNET_DEV_CTR_UNIT_BYTES] = "bytes",
[VNET_DEV_CTR_UNIT_PACKETS] = "packets",
};
if (c->type == VNET_DEV_CTR_TYPE_VENDOR)
{
s = format (s, "%s", c->name);
if (c->unit < ARRAY_LEN (units) && units[c->unit])
s = format (s, " %s", units[c->unit]);
if (c->dir < ARRAY_LEN (directions) && directions[c->dir])
s = format (s, " %s", directions[c->dir]);
}
else if (c->type < ARRAY_LEN (std_counters) && std_counters[c->type])
s = format (s, "%s", std_counters[c->type]);
else
ASSERT (0);
return s;
}
u8 *
format_vnet_dev_counters (u8 *s, va_list *va)
{
vnet_dev_format_args_t *a = va_arg (*va, vnet_dev_format_args_t *);
vnet_dev_counter_main_t *cm = va_arg (*va, vnet_dev_counter_main_t *);
u32 line = 0, indent = format_get_indent (s);
foreach_vnet_dev_counter (c, cm)
{
if (a->show_zero_counters == 0 && cm->counter_data[c->index] == 0)
continue;
if (line++)
s = format (s, "\n%U", format_white_space, indent);
s = format (s, "%-45U%lu", format_vnet_dev_counter_name, c,
cm->counter_data[c->index]);
}
return s;
}

128
src/vnet/dev/counters.h Normal file
View File

@ -0,0 +1,128 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _VNET_DEV_COUNTERS_H_
#define _VNET_DEV_COUNTERS_H_
#include <vnet/dev/dev.h>
typedef enum
{
VNET_DEV_CTR_DIR_NA,
VNET_DEV_CTR_DIR_RX,
VNET_DEV_CTR_DIR_TX,
} __clib_packed vnet_dev_counter_direction_t;
typedef enum
{
VNET_DEV_CTR_TYPE_RX_BYTES,
VNET_DEV_CTR_TYPE_RX_PACKETS,
VNET_DEV_CTR_TYPE_RX_DROPS,
VNET_DEV_CTR_TYPE_TX_BYTES,
VNET_DEV_CTR_TYPE_TX_PACKETS,
VNET_DEV_CTR_TYPE_TX_DROPS,
VNET_DEV_CTR_TYPE_VENDOR,
} __clib_packed vnet_dev_counter_type_t;
typedef enum
{
VNET_DEV_CTR_UNIT_NA,
VNET_DEV_CTR_UNIT_BYTES,
VNET_DEV_CTR_UNIT_PACKETS,
} __clib_packed vnet_dev_counter_unit_t;
typedef struct vnet_dev_counter
{
char name[24];
uword user_data;
vnet_dev_counter_type_t type;
vnet_dev_counter_direction_t dir;
vnet_dev_counter_unit_t unit;
u16 index;
} vnet_dev_counter_t;
typedef struct vnet_dev_counter_main
{
u8 *desc;
u64 *counter_data;
u64 *counter_start;
u16 n_counters;
vnet_dev_counter_t counters[];
} vnet_dev_counter_main_t;
#define VNET_DEV_CTR_RX_BYTES(p, ...) \
{ \
.type = VNET_DEV_CTR_TYPE_RX_BYTES, .dir = VNET_DEV_CTR_DIR_RX, \
.unit = VNET_DEV_CTR_UNIT_BYTES, .user_data = (p), __VA_ARGS__ \
}
#define VNET_DEV_CTR_TX_BYTES(p, ...) \
{ \
.type = VNET_DEV_CTR_TYPE_TX_BYTES, .dir = VNET_DEV_CTR_DIR_TX, \
.unit = VNET_DEV_CTR_UNIT_BYTES, .user_data = (p), __VA_ARGS__ \
}
#define VNET_DEV_CTR_RX_PACKETS(p, ...) \
{ \
.type = VNET_DEV_CTR_TYPE_RX_PACKETS, .dir = VNET_DEV_CTR_DIR_RX, \
.unit = VNET_DEV_CTR_UNIT_PACKETS, .user_data = (p), __VA_ARGS__ \
}
#define VNET_DEV_CTR_TX_PACKETS(p, ...) \
{ \
.type = VNET_DEV_CTR_TYPE_TX_PACKETS, .dir = VNET_DEV_CTR_DIR_TX, \
.unit = VNET_DEV_CTR_UNIT_PACKETS, .user_data = (p), __VA_ARGS__ \
}
#define VNET_DEV_CTR_RX_DROPS(p, ...) \
{ \
.type = VNET_DEV_CTR_TYPE_RX_DROPS, .dir = VNET_DEV_CTR_DIR_RX, \
.unit = VNET_DEV_CTR_UNIT_PACKETS, .user_data = (p), __VA_ARGS__ \
}
#define VNET_DEV_CTR_TX_DROPS(p, ...) \
{ \
.type = VNET_DEV_CTR_TYPE_TX_DROPS, .dir = VNET_DEV_CTR_DIR_TX, \
.unit = VNET_DEV_CTR_UNIT_PACKETS, .user_data = (p), __VA_ARGS__ \
}
#define VNET_DEV_CTR_VENDOR(p, d, u, n, ...) \
{ \
.type = VNET_DEV_CTR_TYPE_VENDOR, .user_data = (p), .name = n, \
.dir = VNET_DEV_CTR_DIR_##d, .unit = VNET_DEV_CTR_UNIT_##u, __VA_ARGS__ \
}
vnet_dev_counter_main_t *vnet_dev_counters_alloc (vlib_main_t *,
vnet_dev_counter_t *, u16,
char *, ...);
void vnet_dev_counters_clear (vlib_main_t *, vnet_dev_counter_main_t *);
void vnet_dev_counters_free (vlib_main_t *, vnet_dev_counter_main_t *);
format_function_t format_vnet_dev_counters;
format_function_t format_vnet_dev_counters_all;
static_always_inline vnet_dev_counter_main_t *
vnet_dev_counter_get_main (vnet_dev_counter_t *counter)
{
return (vnet_dev_counter_main_t *) ((u8 *) (counter - counter->index) -
STRUCT_OFFSET_OF (
vnet_dev_counter_main_t, counters));
}
static_always_inline void
vnet_dev_counter_value_add (vlib_main_t *vm, vnet_dev_counter_t *counter,
u64 val)
{
vnet_dev_counter_main_t *cm = vnet_dev_counter_get_main (counter);
cm->counter_data[counter->index] += val;
}
static_always_inline void
vnet_dev_counter_value_update (vlib_main_t *vm, vnet_dev_counter_t *counter,
u64 val)
{
vnet_dev_counter_main_t *cm = vnet_dev_counter_get_main (counter);
cm->counter_data[counter->index] = val - cm->counter_start[counter->index];
}
#define foreach_vnet_dev_counter(c, cm) \
if (cm) \
for (typeof (*(cm)->counters) *(c) = (cm)->counters; \
(c) < (cm)->counters + (cm)->n_counters; (c)++)
#endif /* _VNET_DEV_COUNTERS_H_ */

456
src/vnet/dev/dev.c Normal file

File diff suppressed because it is too large Load Diff

701
src/vnet/dev/dev.h Normal file

File diff suppressed because it is too large Load Diff

251
src/vnet/dev/dev_funcs.h Normal file
View File

@ -0,0 +1,251 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _VNET_DEV_FUNCS_H_
#define _VNET_DEV_FUNCS_H_
#include <vppinfra/clib.h>
#include <vnet/dev/dev.h>
static_always_inline void *
vnet_dev_get_data (vnet_dev_t *dev)
{
return dev->data;
}
static_always_inline vnet_dev_t *
vnet_dev_from_data (void *p)
{
return (void *) ((u8 *) p - STRUCT_OFFSET_OF (vnet_dev_t, data));
}
static_always_inline void *
vnet_dev_get_port_data (vnet_dev_port_t *port)
{
return port->data;
}
static_always_inline void *
vnet_dev_get_rx_queue_data (vnet_dev_rx_queue_t *rxq)
{
return rxq->data;
}
static_always_inline void *
vnet_dev_get_tx_queue_data (vnet_dev_tx_queue_t *txq)
{
return txq->data;
}
static_always_inline vnet_dev_t *
vnet_dev_get_by_index (u32 index)
{
vnet_dev_main_t *dm = &vnet_dev_main;
return pool_elt_at_index (dm->devices, index)[0];
}
static_always_inline vnet_dev_port_t *
vnet_dev_get_port_by_index (vnet_dev_t *dev, u32 index)
{
return pool_elt_at_index (dev->ports, index)[0];
}
static_always_inline vnet_dev_port_t *
vnet_dev_get_port_from_dev_instance (u32 dev_instance)
{
vnet_dev_main_t *dm = &vnet_dev_main;
if (pool_is_free_index (dm->ports_by_dev_instance, dev_instance))
return 0;
return pool_elt_at_index (dm->ports_by_dev_instance, dev_instance)[0];
}
static_always_inline vnet_dev_t *
vnet_dev_by_id (char *id)
{
vnet_dev_main_t *dm = &vnet_dev_main;
uword *p = hash_get (dm->device_index_by_id, id);
if (p)
return *pool_elt_at_index (dm->devices, p[0]);
return 0;
}
static_always_inline uword
vnet_dev_get_dma_addr (vlib_main_t *vm, vnet_dev_t *dev, void *p)
{
return dev->va_dma ? pointer_to_uword (p) : vlib_physmem_get_pa (vm, p);
}
static_always_inline void *
vnet_dev_get_bus_data (vnet_dev_t *dev)
{
return (void *) dev->bus_data;
}
static_always_inline vnet_dev_bus_t *
vnet_dev_get_bus (vnet_dev_t *dev)
{
vnet_dev_main_t *dm = &vnet_dev_main;
return pool_elt_at_index (dm->buses, dev->bus_index);
}
static_always_inline void
vnet_dev_validate (vlib_main_t *vm, vnet_dev_t *dev)
{
ASSERT (dev->process_node_index == vlib_get_current_process_node_index (vm));
ASSERT (vm->thread_index == 0);
}
static_always_inline void
vnet_dev_port_validate (vlib_main_t *vm, vnet_dev_port_t *port)
{
ASSERT (port->dev->process_node_index ==
vlib_get_current_process_node_index (vm));
ASSERT (vm->thread_index == 0);
}
static_always_inline u32
vnet_dev_port_get_sw_if_index (vnet_dev_port_t *port)
{
return port->intf.sw_if_index;
}
static_always_inline vnet_dev_port_t *
vnet_dev_get_port_by_id (vnet_dev_t *dev, vnet_dev_port_id_t port_id)
{
foreach_vnet_dev_port (p, dev)
if (p->port_id == port_id)
return p;
return 0;
}
static_always_inline void *
vnet_dev_alloc_with_data (u32 sz, u32 data_sz)
{
void *p;
sz += data_sz;
sz = round_pow2 (sz, CLIB_CACHE_LINE_BYTES);
p = clib_mem_alloc_aligned (sz, CLIB_CACHE_LINE_BYTES);
clib_memset (p, 0, sz);
return p;
}
static_always_inline void
vnet_dev_tx_queue_lock_if_needed (vnet_dev_tx_queue_t *txq)
{
u8 free = 0;
if (!txq->lock_needed)
return;
while (!__atomic_compare_exchange_n (&txq->lock, &free, 1, 0,
__ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
{
while (__atomic_load_n (&txq->lock, __ATOMIC_RELAXED))
CLIB_PAUSE ();
free = 0;
}
}
static_always_inline void
vnet_dev_tx_queue_unlock_if_needed (vnet_dev_tx_queue_t *txq)
{
if (!txq->lock_needed)
return;
__atomic_store_n (&txq->lock, 0, __ATOMIC_RELEASE);
}
static_always_inline u8
vnet_dev_get_rx_queue_buffer_pool_index (vnet_dev_rx_queue_t *rxq)
{
return rxq->buffer_template.buffer_pool_index;
}
static_always_inline void
vnet_dev_rx_queue_rt_request (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq,
vnet_dev_rx_queue_rt_req_t req)
{
__atomic_fetch_or (&rxq->runtime_request.as_number, req.as_number,
__ATOMIC_RELEASE);
}
static_always_inline vnet_dev_rx_node_runtime_t *
vnet_dev_get_rx_node_runtime (vlib_node_runtime_t *node)
{
return (void *) node->runtime_data;
}
static_always_inline vnet_dev_tx_node_runtime_t *
vnet_dev_get_tx_node_runtime (vlib_node_runtime_t *node)
{
return (void *) node->runtime_data;
}
static_always_inline vnet_dev_rx_queue_t **
foreach_vnet_dev_rx_queue_runtime_helper (vlib_node_runtime_t *node)
{
vnet_dev_rx_node_runtime_t *rt = vnet_dev_get_rx_node_runtime (node);
return rt->rx_queues;
}
static_always_inline int
vnet_dev_rx_queue_runtime_update (vnet_dev_rx_queue_t *rxq)
{
vnet_dev_port_t *port;
vnet_dev_rx_queue_rt_req_t req;
int rv = 1;
if (PREDICT_TRUE (rxq->runtime_request.as_number == 0))
return 1;
req.as_number =
__atomic_exchange_n (&rxq->runtime_request.as_number, 0, __ATOMIC_ACQUIRE);
port = rxq->port;
if (req.update_next_index)
rxq->next_index = port->intf.rx_next_index;
if (req.update_feature_arc)
{
vlib_buffer_template_t *bt = &rxq->buffer_template;
bt->current_config_index = port->intf.current_config_index;
vnet_buffer (bt)->feature_arc_index = port->intf.feature_arc_index;
}
if (req.suspend_on)
{
rxq->suspended = 1;
rv = 0;
}
if (req.suspend_off)
rxq->suspended = 0;
return rv;
}
static_always_inline void *
vnet_dev_get_rt_temp_space (vlib_main_t *vm)
{
return vnet_dev_main.runtime_temp_spaces +
((uword) vm->thread_index
<< vnet_dev_main.log2_runtime_temp_space_sz);
}
static_always_inline void
vnet_dev_set_hw_addr_eth_mac (vnet_dev_hw_addr_t *addr, const u8 *eth_mac_addr)
{
vnet_dev_hw_addr_t ha = {};
clib_memcpy_fast (&ha.eth_mac, eth_mac_addr, sizeof (ha.eth_mac));
*addr = ha;
}
#define foreach_vnet_dev_rx_queue_runtime(q, node) \
for (vnet_dev_rx_queue_t * \
*__qp = foreach_vnet_dev_rx_queue_runtime_helper (node), \
**__last = __qp + (vnet_dev_get_rx_node_runtime (node))->n_rx_queues, \
*(q) = *__qp; \
__qp < __last; __qp++, (q) = *__qp) \
if (vnet_dev_rx_queue_runtime_update (q))
#endif /* _VNET_DEV_FUNCS_H_ */

29
src/vnet/dev/error.c Normal file
View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include <vnet/vnet.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/counters.h>
clib_error_t *
vnet_dev_port_err (vlib_main_t *vm, vnet_dev_port_t *port, vnet_dev_rv_t rv,
char *fmt, ...)
{
clib_error_t *err;
va_list va;
u8 *s;
if (rv == VNET_DEV_OK)
return 0;
va_start (va, fmt);
s = va_format (0, fmt, &va);
va_end (va);
err = clib_error_return (0, "%s port %u: %U (%v)", port->dev->device_id,
port->port_id, format_vnet_dev_rv, rv, s);
vec_free (s);
return err;
}

42
src/vnet/dev/errors.h Normal file
View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _VNET_DEV_ERRORS_H_
#define _VNET_DEV_ERRORS_H_
#define foreach_vnet_dev_rv_type \
_ (ALREADY_EXISTS, "already exists") \
_ (ALREADY_IN_USE, "already in use") \
_ (BUFFER_ALLOC_FAIL, "packet buffer allocation failure") \
_ (BUG, "bug") \
_ (BUS, "bus error") \
_ (DEVICE_NO_REPLY, "no reply from device") \
_ (DMA_MEM_ALLOC_FAIL, "DMA memory allocation error") \
_ (DRIVER_NOT_AVAILABLE, "driver not available") \
_ (INVALID_BUS, "invalid bus") \
_ (INVALID_DATA, "invalid data") \
_ (INVALID_DEVICE_ID, "invalid device id") \
_ (INVALID_NUM_RX_QUEUES, "invalid number of rx queues") \
_ (INVALID_NUM_TX_QUEUES, "invalid number of tx queues") \
_ (INVALID_PORT_ID, "invalid port id") \
_ (INVALID_RX_QUEUE_SIZE, "invalid rx queue size") \
_ (INVALID_TX_QUEUE_SIZE, "invalid tx queue size") \
_ (INVALID_VALUE, "invalid value") \
_ (INTERNAL, "internal error") \
_ (NOT_FOUND, "not found") \
_ (NOT_READY, "not ready") \
_ (NOT_SUPPORTED, "not supported") \
_ (NO_CHANGE, "no change") \
_ (NO_AVAIL_QUEUES, "no queues available") \
_ (NO_SUCH_ENTRY, "no such enty") \
_ (PORT_STARTED, "port started") \
_ (PROCESS_REPLY, "dev process reply error") \
_ (RESOURCE_NOT_AVAILABLE, "resource not available") \
_ (TIMEOUT, "timeout") \
_ (UNKNOWN_INTERFACE, "unknown interface") \
_ (UNSUPPORTED_CONFIG, "unsupported config") \
_ (UNSUPPORTED_DEVICE, "unsupported device") \
_ (UNSUPPORTED_DEVICE_VER, "unsupported device version")
#endif /* _VNET_DEV_ERRORS_H_ */

405
src/vnet/dev/format.c Normal file

File diff suppressed because it is too large Load Diff

225
src/vnet/dev/handlers.c Normal file
View File

@ -0,0 +1,225 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include <vnet/vnet.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/counters.h>
#include <vnet/dev/log.h>
#include <vnet/flow/flow.h>
VLIB_REGISTER_LOG_CLASS (dev_log, static) = {
.class_name = "dev",
.subclass_name = "handler",
};
clib_error_t *
vnet_dev_port_set_max_frame_size (vnet_main_t *vnm, vnet_hw_interface_t *hw,
u32 frame_size)
{
vlib_main_t *vm = vlib_get_main ();
vnet_dev_port_t *p = vnet_dev_get_port_from_dev_instance (hw->dev_instance);
vnet_dev_rv_t rv;
vnet_dev_port_cfg_change_req_t req = {
.type = VNET_DEV_PORT_CFG_MAX_FRAME_SIZE,
.max_frame_size = frame_size,
};
log_debug (p->dev, "size %u", frame_size);
rv = vnet_dev_port_cfg_change_req_validate (vm, p, &req);
if (rv == VNET_DEV_ERR_NO_CHANGE)
return 0;
if (rv != VNET_DEV_OK)
return vnet_dev_port_err (vm, p, rv,
"new max frame size is not valid for port");
if ((rv = vnet_dev_process_port_cfg_change_req (vm, p, &req)) != VNET_DEV_OK)
return vnet_dev_port_err (vm, p, rv,
"device failed to change max frame size");
return 0;
}
u32
vnet_dev_port_eth_flag_change (vnet_main_t *vnm, vnet_hw_interface_t *hw,
u32 flags)
{
vlib_main_t *vm = vlib_get_main ();
vnet_dev_port_t *p = vnet_dev_get_port_from_dev_instance (hw->dev_instance);
vnet_dev_rv_t rv;
vnet_dev_port_cfg_change_req_t req = {
.type = VNET_DEV_PORT_CFG_PROMISC_MODE,
};
switch (flags)
{
case ETHERNET_INTERFACE_FLAG_DEFAULT_L3:
log_debug (p->dev, "promisc off");
break;
case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
log_debug (p->dev, "promisc on");
req.promisc = 1;
break;
default:
return ~0;
}
rv = vnet_dev_port_cfg_change_req_validate (vm, p, &req);
if (rv == VNET_DEV_ERR_NO_CHANGE)
return 0;
if (rv != VNET_DEV_OK)
return ~0;
rv = vnet_dev_process_port_cfg_change_req (vm, p, &req);
if (rv == VNET_DEV_OK || rv == VNET_DEV_ERR_NO_CHANGE)
return 0;
return ~0;
}
clib_error_t *
vnet_dev_port_mac_change (vnet_hw_interface_t *hi, const u8 *old,
const u8 *new)
{
vlib_main_t *vm = vlib_get_main ();
vnet_dev_port_t *p = vnet_dev_get_port_from_dev_instance (hi->dev_instance);
vnet_dev_rv_t rv;
vnet_dev_port_cfg_change_req_t req = {
.type = VNET_DEV_PORT_CFG_CHANGE_PRIMARY_HW_ADDR,
};
vnet_dev_set_hw_addr_eth_mac (&req.addr, new);
log_debug (p->dev, "new mac %U", format_vnet_dev_hw_addr, &req.addr);
rv = vnet_dev_port_cfg_change_req_validate (vm, p, &req);
if (rv == VNET_DEV_ERR_NO_CHANGE)
return 0;
if (rv != VNET_DEV_OK)
return vnet_dev_port_err (vm, p, rv, "hw address is not valid for port");
if ((rv = vnet_dev_process_port_cfg_change_req (vm, p, &req)) != VNET_DEV_OK)
return vnet_dev_port_err (vm, p, rv, "device failed to change hw address");
return 0;
}
clib_error_t *
vnet_dev_add_del_mac_address (vnet_hw_interface_t *hi, const u8 *address,
u8 is_add)
{
vlib_main_t *vm = vlib_get_main ();
vnet_dev_port_t *p = vnet_dev_get_port_from_dev_instance (hi->dev_instance);
vnet_dev_rv_t rv;
vnet_dev_port_cfg_change_req_t req = {
.type = is_add ? VNET_DEV_PORT_CFG_ADD_SECONDARY_HW_ADDR :
VNET_DEV_PORT_CFG_REMOVE_SECONDARY_HW_ADDR,
};
vnet_dev_set_hw_addr_eth_mac (&req.addr, address);
log_debug (p->dev, "received (addr %U is_add %u", format_vnet_dev_hw_addr,
&req.addr, is_add);
rv = vnet_dev_port_cfg_change_req_validate (vm, p, &req);
if (rv != VNET_DEV_OK)
return vnet_dev_port_err (vm, p, rv,
"provided secondary hw addresses cannot "
"be added/removed");
if ((rv = vnet_dev_process_port_cfg_change_req (vm, p, &req)) != VNET_DEV_OK)
return vnet_dev_port_err (
vm, p, rv, "device failed to add/remove secondary hw address");
return 0;
}
int
vnet_dev_flow_ops_fn (vnet_main_t *vnm, vnet_flow_dev_op_t op,
u32 dev_instance, u32 flow_index, uword *private_data)
{
vnet_dev_port_t *p = vnet_dev_get_port_from_dev_instance (dev_instance);
log_warn (p->dev, "unsupported request for flow_ops received");
return VNET_FLOW_ERROR_NOT_SUPPORTED;
}
clib_error_t *
vnet_dev_interface_set_rss_queues (vnet_main_t *vnm, vnet_hw_interface_t *hi,
clib_bitmap_t *bitmap)
{
vnet_dev_port_t *p = vnet_dev_get_port_from_dev_instance (hi->dev_instance);
log_warn (p->dev, "unsupported request for flow_ops received");
return vnet_error (VNET_ERR_UNSUPPORTED, "not implemented");
}
void
vnet_dev_clear_hw_interface_counters (u32 instance)
{
vnet_dev_port_t *port = vnet_dev_get_port_from_dev_instance (instance);
vlib_main_t *vm = vlib_get_main ();
vnet_dev_process_call_port_op_no_rv (vm, port, vnet_dev_port_clear_counters);
}
clib_error_t *
vnet_dev_rx_mode_change_fn (vnet_main_t *vnm, u32 hw_if_index, u32 qid,
vnet_hw_if_rx_mode mode)
{
return clib_error_return (0, "not supported");
}
void
vnet_dev_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
u32 node_index)
{
vlib_main_t *vm = vlib_get_main ();
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
vnet_dev_port_t *port =
vnet_dev_get_port_from_dev_instance (hw->dev_instance);
int runtime_update = 0;
if (node_index == ~0)
{
port->intf.redirect_to_node_next_index = 0;
if (port->intf.feature_arc == 0)
{
port->intf.rx_next_index =
vnet_dev_default_next_index_by_port_type[port->attr.type];
runtime_update = 1;
}
port->intf.redirect_to_node = 0;
}
else
{
u16 next_index = vlib_node_add_next (vlib_get_main (),
port_rx_eth_node.index, node_index);
port->intf.redirect_to_node_next_index = next_index;
if (port->intf.feature_arc == 0)
{
port->intf.rx_next_index = next_index;
runtime_update = 1;
}
port->intf.redirect_to_node = 1;
}
port->intf.rx_next_index =
node_index == ~0 ?
vnet_dev_default_next_index_by_port_type[port->attr.type] :
node_index;
if (runtime_update)
{
foreach_vnet_dev_port_rx_queue (rxq, port)
vnet_dev_rx_queue_rt_request (
vm, rxq, (vnet_dev_rx_queue_rt_req_t){ .update_next_index = 1 });
log_debug (port->dev, "runtime update requested due to chgange in "
"reditect-to-next configuration");
}
}

23
src/vnet/dev/log.h Normal file
View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _VNET_DEV_LOG_H_
#define _VNET_DEV_LOG_H_
format_function_t format_vnet_dev_log;
#define log_debug(dev, f, ...) \
vlib_log (VLIB_LOG_LEVEL_DEBUG, dev_log.class, "%U" f, format_vnet_dev_log, \
dev, __func__, ##__VA_ARGS__)
#define log_notice(dev, f, ...) \
vlib_log (VLIB_LOG_LEVEL_NOTICE, dev_log.class, "%U" f, \
format_vnet_dev_log, dev, 0, ##__VA_ARGS__)
#define log_warn(dev, f, ...) \
vlib_log (VLIB_LOG_LEVEL_WARNING, dev_log.class, "%U" f, \
format_vnet_dev_log, dev, 0, ##__VA_ARGS__)
#define log_err(dev, f, ...) \
vlib_log (VLIB_LOG_LEVEL_ERR, dev_log.class, "%U" f, format_vnet_dev_log, \
dev, 0, ##__VA_ARGS__)
#endif /* _VNET_DEV_LOG_H_ */

10
src/vnet/dev/mgmt.h Normal file
View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _VNET_DEV_MGMT_H_
#define _VNET_DEV_MGMT_H_
#include <vppinfra/clib.h>
#endif /* _VNET_DEV_MGMT_H_ */

447
src/vnet/dev/pci.c Normal file

File diff suppressed because it is too large Load Diff

78
src/vnet/dev/pci.h Normal file
View File

@ -0,0 +1,78 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _VNET_DEV_PCI_H_
#define _VNET_DEV_PCI_H_
#include <vppinfra/clib.h>
#include <vlib/pci/pci.h>
#include <vnet/dev/dev.h>
typedef void (vnet_dev_pci_intx_handler_fn_t) (vlib_main_t *vm,
vnet_dev_t *dev);
typedef void (vnet_dev_pci_msix_handler_fn_t) (vlib_main_t *vm,
vnet_dev_t *dev, u16 line);
typedef struct
{
vlib_pci_addr_t addr;
u16 vendor_id;
u16 device_id;
u8 revision;
} vnet_dev_bus_pci_device_info_t;
typedef struct
{
u8 pci_handle_valid : 1;
u16 n_msix_int;
vlib_pci_addr_t addr;
vlib_pci_dev_handle_t handle;
vnet_dev_pci_intx_handler_fn_t *intx_handler;
vnet_dev_pci_msix_handler_fn_t **msix_handlers;
} vnet_dev_bus_pci_device_data_t;
static_always_inline vnet_dev_bus_pci_device_data_t *
vnet_dev_get_bus_pci_device_data (vnet_dev_t *dev)
{
return (void *) dev->bus_data;
}
static_always_inline vlib_pci_dev_handle_t
vnet_dev_get_pci_handle (vnet_dev_t *dev)
{
return ((vnet_dev_bus_pci_device_data_t *) (dev->bus_data))->handle;
}
static_always_inline vlib_pci_addr_t
vnet_dev_get_pci_addr (vnet_dev_t *dev)
{
return ((vnet_dev_bus_pci_device_data_t *) (dev->bus_data))->addr;
}
static_always_inline vlib_pci_dev_handle_t
vnet_dev_get_pci_n_msix_interrupts (vnet_dev_t *dev)
{
return vnet_dev_get_bus_pci_device_data (dev)->n_msix_int;
}
vnet_dev_rv_t vnet_dev_pci_read_config_header (vlib_main_t *, vnet_dev_t *,
vlib_pci_config_hdr_t *);
vnet_dev_rv_t vnet_dev_pci_map_region (vlib_main_t *, vnet_dev_t *, u8,
void **);
vnet_dev_rv_t vnet_dev_pci_function_level_reset (vlib_main_t *, vnet_dev_t *);
vnet_dev_rv_t vnet_dev_pci_bus_master_enable (vlib_main_t *, vnet_dev_t *);
vnet_dev_rv_t vnet_dev_pci_bus_master_disable (vlib_main_t *, vnet_dev_t *);
vnet_dev_rv_t vnet_dev_pci_intx_add_handler (vlib_main_t *, vnet_dev_t *,
vnet_dev_pci_intx_handler_fn_t *);
vnet_dev_rv_t vnet_dev_pci_intx_remove_handler (vlib_main_t *, vnet_dev_t *);
vnet_dev_rv_t vnet_dev_pci_msix_add_handler (vlib_main_t *, vnet_dev_t *,
vnet_dev_pci_msix_handler_fn_t *,
u16, u16);
vnet_dev_rv_t vnet_dev_pci_msix_remove_handler (vlib_main_t *, vnet_dev_t *,
u16, u16);
vnet_dev_rv_t vnet_dev_pci_msix_enable (vlib_main_t *, vnet_dev_t *, u16, u16);
vnet_dev_rv_t vnet_dev_pci_msix_disable (vlib_main_t *, vnet_dev_t *, u16,
u16);
#endif /* _VNET_DEV_PCI_H_ */

678
src/vnet/dev/port.c Normal file

File diff suppressed because it is too large Load Diff

474
src/vnet/dev/process.c Normal file

File diff suppressed because it is too large Load Diff

10
src/vnet/dev/process.h Normal file
View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _VNET_DEV_PROCESS_H_
#define _VNET_DEV_PROCESS_H_
#include <vppinfra/clib.h>
#endif /* _VNET_DEV_PROCESS_H_ */

227
src/vnet/dev/queue.c Normal file
View File

@ -0,0 +1,227 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include <vnet/vnet.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/counters.h>
#include <vnet/dev/log.h>
VLIB_REGISTER_LOG_CLASS (dev_log, static) = {
.class_name = "dev",
.subclass_name = "error",
};
void
vnet_dev_rx_queue_free (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
{
vnet_dev_port_t *port = rxq->port;
vnet_dev_t *dev = port->dev;
log_debug (dev, "queue %u", rxq->queue_id);
if (port->rx_queue_ops.free)
port->rx_queue_ops.free (vm, rxq);
vnet_dev_rx_queue_free_counters (vm, rxq);
pool_put_index (port->rx_queues, rxq->index);
clib_mem_free (rxq);
}
vnet_dev_rv_t
vnet_dev_rx_queue_alloc (vlib_main_t *vm, vnet_dev_port_t *port,
u16 queue_size)
{
vnet_dev_main_t *dm = &vnet_dev_main;
vnet_dev_rx_queue_t *rxq, **qp;
vnet_dev_t *dev = port->dev;
vnet_dev_rv_t rv = VNET_DEV_OK;
u16 n_threads = vlib_get_n_threads ();
u8 buffer_pool_index;
vnet_dev_port_validate (vm, port);
log_debug (dev, "port %u queue_size %u", port->port_id, queue_size);
if (pool_elts (port->rx_queues) == port->attr.max_rx_queues)
return VNET_DEV_ERR_NO_AVAIL_QUEUES;
rxq = vnet_dev_alloc_with_data (sizeof (vnet_dev_port_t),
port->rx_queue_config.data_size);
pool_get (port->rx_queues, qp);
qp[0] = rxq;
rxq->enabled = 1;
rxq->port = port;
rxq->size = queue_size;
rxq->index = qp - port->rx_queues;
/* default queue id - can be changed by driver */
rxq->queue_id = qp - port->rx_queues;
ASSERT (rxq->queue_id < port->attr.max_rx_queues);
if (n_threads > 1)
{
rxq->rx_thread_index = dm->next_rx_queue_thread++;
if (dm->next_rx_queue_thread >= n_threads)
dm->next_rx_queue_thread = 1;
}
buffer_pool_index =
vlib_buffer_pool_get_default_for_numa (vm, dev->numa_node);
vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
rxq->buffer_template = bp->buffer_template;
vnet_buffer (&rxq->buffer_template)->sw_if_index[VLIB_TX] = ~0;
rxq->next_index = vnet_dev_default_next_index_by_port_type[port->attr.type];
if (port->rx_queue_ops.alloc)
rv = port->rx_queue_ops.alloc (vm, rxq);
if (rv != VNET_DEV_OK)
{
log_err (dev, "driver rejected rx queue add with rv %d", rv);
vnet_dev_rx_queue_free (vm, rxq);
}
else
log_debug (dev, "queue %u added, assigned to thread %u", rxq->queue_id,
rxq->rx_thread_index);
return rv;
}
vnet_dev_rv_t
vnet_dev_rx_queue_start (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
{
vnet_dev_rv_t rv = VNET_DEV_OK;
if (rxq->port->rx_queue_ops.start)
rv = rxq->port->rx_queue_ops.start (vm, rxq);
if (rv == VNET_DEV_OK)
rxq->started = 1;
return rv;
}
void
vnet_dev_rx_queue_stop (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
{
if (rxq->port->rx_queue_ops.stop)
rxq->port->rx_queue_ops.stop (vm, rxq);
vlib_node_set_state (vm, rxq->port->intf.rx_node_index,
VLIB_NODE_STATE_DISABLED);
rxq->started = 0;
}
void
vnet_dev_tx_queue_free (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
{
vnet_dev_port_t *port = txq->port;
vnet_dev_t *dev = port->dev;
vnet_dev_port_validate (vm, port);
log_debug (dev, "queue %u", txq->queue_id);
if (port->tx_queue_ops.free)
port->tx_queue_ops.free (vm, txq);
clib_bitmap_free (txq->assigned_threads);
vnet_dev_tx_queue_free_counters (vm, txq);
pool_put_index (port->tx_queues, txq->index);
clib_mem_free (txq);
}
vnet_dev_rv_t
vnet_dev_tx_queue_alloc (vlib_main_t *vm, vnet_dev_port_t *port,
u16 queue_size)
{
vnet_dev_tx_queue_t *txq, **qp;
vnet_dev_t *dev = port->dev;
vnet_dev_rv_t rv = VNET_DEV_OK;
log_debug (dev, "port %u size %u", port->port_id, queue_size);
if (pool_elts (port->tx_queues) == port->attr.max_tx_queues)
return VNET_DEV_ERR_NO_AVAIL_QUEUES;
txq = vnet_dev_alloc_with_data (sizeof (vnet_dev_port_t),
port->tx_queue_config.data_size);
pool_get (port->tx_queues, qp);
qp[0] = txq;
txq->enabled = 1;
txq->port = port;
txq->size = queue_size;
txq->index = qp - port->tx_queues;
/* default queue id - can be changed by driver */
txq->queue_id = qp - port->tx_queues;
ASSERT (txq->queue_id < port->attr.max_tx_queues);
if (port->tx_queue_ops.alloc)
rv = port->tx_queue_ops.alloc (vm, txq);
if (rv != VNET_DEV_OK)
{
log_err (dev, "driver rejected tx queue alloc with rv %d", rv);
vnet_dev_tx_queue_free (vm, txq);
}
else
log_debug (dev, "queue %u added", txq->queue_id);
return rv;
}
vnet_dev_rv_t
vnet_dev_tx_queue_start (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
{
vnet_dev_rv_t rv = VNET_DEV_OK;
if (txq->port->tx_queue_ops.start)
rv = txq->port->tx_queue_ops.start (vm, txq);
if (rv == VNET_DEV_OK)
txq->started = 1;
return rv;
}
void
vnet_dev_tx_queue_stop (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
{
if (txq->port->tx_queue_ops.stop)
txq->port->tx_queue_ops.stop (vm, txq);
txq->started = 0;
}
void
vnet_dev_rx_queue_add_counters (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq,
vnet_dev_counter_t *counters, u16 n_counters)
{
rxq->counter_main = vnet_dev_counters_alloc (
vm, counters, n_counters, "%s port %u rx-queue %u counters",
rxq->port->dev->device_id, rxq->port->port_id, rxq->queue_id);
}
void
vnet_dev_rx_queue_free_counters (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
{
if (rxq->counter_main)
vnet_dev_counters_free (vm, rxq->counter_main);
}
void
vnet_dev_tx_queue_add_counters (vlib_main_t *vm, vnet_dev_tx_queue_t *txq,
vnet_dev_counter_t *counters, u16 n_counters)
{
txq->counter_main = vnet_dev_counters_alloc (
vm, counters, n_counters, "%s port %u tx-queue %u counters",
txq->port->dev->device_id, txq->port->port_id, txq->queue_id);
}
void
vnet_dev_tx_queue_free_counters (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
{
if (!txq->counter_main)
return;
log_debug (txq->port->dev, "free");
vnet_dev_counters_free (vm, txq->counter_main);
}

174
src/vnet/dev/runtime.c Normal file
View File

@ -0,0 +1,174 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include "vppinfra/bitmap.h"
#include "vppinfra/lock.h"
#include <vnet/vnet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/log.h>
VLIB_REGISTER_LOG_CLASS (dev_log, static) = {
.class_name = "dev",
.subclass_name = "runtime",
};
static vnet_dev_rt_op_t *rt_ops;
static void
_vnet_dev_rt_exec_op (vlib_main_t *vm, vnet_dev_rt_op_t *op)
{
if (op->type == VNET_DEV_RT_OP_TYPE_RX_QUEUE)
{
vnet_dev_rx_node_runtime_t *rtd;
vnet_dev_rx_queue_t *rxq = op->rx_queue;
u32 i, node_index = rxq->port->intf.rx_node_index;
rtd = vlib_node_get_runtime_data (vm, node_index);
if (op->action == VNET_DEV_RT_OP_ACTION_START)
{
for (i = 0; i < rtd->n_rx_queues; i++)
ASSERT (rtd->rx_queues[i] != op->rx_queue);
rtd->rx_queues[rtd->n_rx_queues++] = op->rx_queue;
}
else if (op->action == VNET_DEV_RT_OP_ACTION_STOP)
{
for (i = 0; i < rtd->n_rx_queues; i++)
if (rtd->rx_queues[i] == op->rx_queue)
break;
ASSERT (i < rtd->n_rx_queues);
rtd->n_rx_queues--;
for (; i < rtd->n_rx_queues; i++)
rtd->rx_queues[i] = rtd->rx_queues[i + 1];
}
if (rtd->n_rx_queues == 1)
vlib_node_set_state (vm, node_index, VLIB_NODE_STATE_POLLING);
else if (rtd->n_rx_queues == 0)
vlib_node_set_state (vm, node_index, VLIB_NODE_STATE_DISABLED);
__atomic_store_n (&op->completed, 1, __ATOMIC_RELEASE);
}
}
static int
_vnet_dev_rt_op_not_occured_before (vnet_dev_rt_op_t *first,
vnet_dev_rt_op_t *current)
{
for (vnet_dev_rt_op_t *op = first; op < current; op++)
if (op->rx_queue == current->rx_queue && op->completed == 0)
return 0;
return 1;
}
static uword
vnet_dev_rt_mgmt_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_frame_t *frame)
{
u16 thread_index = vm->thread_index;
vnet_dev_rt_op_t *ops = __atomic_load_n (&rt_ops, __ATOMIC_ACQUIRE);
vnet_dev_rt_op_t *op;
int come_back = 0;
uword rv = 0;
vec_foreach (op, ops)
if (op->thread_index == thread_index)
{
if (_vnet_dev_rt_op_not_occured_before (ops, op))
{
_vnet_dev_rt_exec_op (vm, op);
rv++;
}
else
come_back = 1;
}
if (come_back)
vlib_node_set_interrupt_pending (vm, node->node_index);
return rv;
}
VLIB_REGISTER_NODE (vnet_dev_rt_mgmt_node, static) = {
.function = vnet_dev_rt_mgmt_node_fn,
.name = "dev-rt-mgmt",
.type = VLIB_NODE_TYPE_PRE_INPUT,
.state = VLIB_NODE_STATE_INTERRUPT,
};
u8 *
format_vnet_dev_mgmt_op (u8 *s, va_list *args)
{
vnet_dev_rt_op_t *op = va_arg (*args, vnet_dev_rt_op_t *);
char *types[] = {
[VNET_DEV_RT_OP_TYPE_RX_QUEUE] = "rx queue",
};
char *actions[] = {
[VNET_DEV_RT_OP_ACTION_START] = "start",
[VNET_DEV_RT_OP_ACTION_STOP] = "stop",
};
return format (s, "port %u %s %u %s on thread %u",
op->rx_queue->port->port_id, types[op->type],
op->rx_queue->queue_id, actions[op->action],
op->thread_index);
}
vnet_dev_rv_t
vnet_dev_rt_exec_ops (vlib_main_t *vm, vnet_dev_t *dev, vnet_dev_rt_op_t *ops,
u32 n_ops)
{
vnet_dev_rt_op_t *op = ops;
vnet_dev_rt_op_t *remote_ops = 0;
clib_bitmap_t *remote_bmp = 0;
u32 i;
ASSERT (rt_ops == 0);
for (op = ops; op < (ops + n_ops); op++)
{
vlib_main_t *tvm = vlib_get_main_by_index (op->thread_index);
if ((vlib_worker_thread_barrier_held ()) ||
(op->thread_index == vm->thread_index &&
_vnet_dev_rt_op_not_occured_before (ops, op)))
{
_vnet_dev_rt_exec_op (tvm, op);
log_debug (dev, "%U executed locally", format_vnet_dev_mgmt_op, op);
continue;
}
vec_add1 (remote_ops, *op);
log_debug (dev, "%U enqueued for remote execution",
format_vnet_dev_mgmt_op, op);
remote_bmp = clib_bitmap_set (remote_bmp, op->thread_index, 1);
}
if (remote_ops == 0)
return VNET_DEV_OK;
__atomic_store_n (&rt_ops, remote_ops, __ATOMIC_RELEASE);
clib_bitmap_foreach (i, remote_bmp)
{
vlib_node_set_interrupt_pending (vlib_get_main_by_index (i),
vnet_dev_rt_mgmt_node.index);
log_debug (dev, "interrupt sent to %s node on thread %u",
vnet_dev_rt_mgmt_node.name, i);
}
vec_foreach (op, remote_ops)
{
while (op->completed == 0)
CLIB_PAUSE ();
}
__atomic_store_n (&rt_ops, 0, __ATOMIC_RELAXED);
vec_free (remote_ops);
clib_bitmap_free (remote_bmp);
return VNET_DEV_OK;
}

66
src/vnet/dev/types.h Normal file
View File

@ -0,0 +1,66 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _VNET_DEV_TYPES_H_
#define _VNET_DEV_TYPES_H_
#include <vppinfra/types.h>
#include <vnet/dev/errors.h>
typedef char vnet_dev_device_id_t[32];
typedef char vnet_dev_if_name_t[32];
typedef char vnet_dev_driver_name_t[16];
typedef char vnet_dev_bus_name_t[8];
typedef u16 vnet_dev_port_id_t;
typedef struct vnet_dev vnet_dev_t;
typedef struct vnet_dev_port vnet_dev_port_t;
typedef struct vnet_dev_rx_queue vnet_dev_rx_queue_t;
typedef struct vnet_dev_tx_queue vnet_dev_tx_queue_t;
typedef enum
{
VNET_DEV_MINUS_OK = 0,
#define _(n, d) VNET_DEV_ERR_MINUS_##n,
foreach_vnet_dev_rv_type
#undef _
} vnet_dev_minus_rv_t;
typedef enum
{
VNET_DEV_OK = 0,
#define _(n, d) VNET_DEV_ERR_##n = -(VNET_DEV_ERR_MINUS_##n),
foreach_vnet_dev_rv_type
#undef _
} vnet_dev_rv_t;
/* do not change bit assignments - API dependency */
#define foreach_vnet_dev_flag _ (3, NO_STATS, "don't poll device stats")
typedef union
{
enum
{
#define _(b, n, d) VNET_DEV_F_##n = 1ull << (b),
foreach_vnet_dev_flag
#undef _
} e;
u64 n;
} vnet_dev_flags_t;
/* do not change bit assignments - API dependency */
#define foreach_vnet_dev_port_flag \
_ (3, INTERRUPT_MODE, "enable interrupt mode")
typedef union
{
enum
{
#define _(b, n, d) VNET_DEV_PORT_F_##n = 1ull << (b),
foreach_vnet_dev_port_flag
#undef _
} e;
u64 n;
} vnet_dev_port_flags_t;
#endif /* _VNET_DEV_TYPES_H_ */

View File

@ -146,6 +146,8 @@ p2p_ethernet_add_del (vlib_main_t * vm, u32 parent_if_index,
vnet_feature_enable_disable ("device-input",
"p2p-ethernet-input",
parent_if_index, 1, 0, 0);
vnet_feature_enable_disable ("port-rx-eth", "p2p-ethernet-input",
parent_if_index, 1, 0, 0);
/* Set promiscuous mode on the l2 interface */
ethernet_set_flags (vnm, parent_if_index,
ETHERNET_INTERFACE_FLAG_ACCEPT_ALL);
@ -176,6 +178,9 @@ p2p_ethernet_add_del (vlib_main_t * vm, u32 parent_if_index,
vnet_feature_enable_disable ("device-input",
"p2p-ethernet-input",
parent_if_index, 0, 0, 0);
vnet_feature_enable_disable ("port-rx-eth",
"p2p-ethernet-input",
parent_if_index, 0, 0, 0);
/* Disable promiscuous mode on the l2 interface */
ethernet_set_flags (vnm, parent_if_index, 0);
}

View File

@ -244,6 +244,8 @@ interface_handoff_enable_disable (vlib_main_t *vm, u32 sw_if_index,
vnet_feature_enable_disable ("device-input", "worker-handoff",
sw_if_index, enable_disable, 0, 0);
vnet_feature_enable_disable ("port-rx-eth", "worker-handoff", sw_if_index,
enable_disable, 0, 0);
return rv;
}

Some files were not shown because too many files have changed in this diff Show More