iavf: new driver using new dev infra

Type: feature
Change-Id: I9ae0dbf28b4571a37c568b587b771f90c06f200d
Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
Damjan Marion
2023-10-17 16:08:18 +00:00
parent 38c619115b
commit 47447f1f59
18 changed files with 4589 additions and 4 deletions

View File

@ -410,6 +410,11 @@ I: avf
M: Damjan Marion <damarion@cisco.com>
F: src/plugins/avf/
Plugin - IAVF Device driver
I: iavf
M: Damjan Marion <damarion@cisco.com>
F: src/plugins/dev_iavf/
Plugin - Dispatch Trace PCAP
I: dispatch-trace
M: Dave Barach <vpp@barachs.net>

View File

@ -248,6 +248,7 @@ Dest
det
dev
devbind
dev_iavf
df
dhcp
dhcp
@ -465,6 +466,7 @@ ia
iacl
iAcl
iACL
iavf
iBGP
ibverb
IBverbs

View File

@ -0,0 +1,20 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright(c) 2022 Cisco Systems, Inc.
add_vpp_plugin(dev_iavf
SOURCES
adminq.c
counters.c
format.c
iavf.c
port.c
queue.c
rx_node.c
tx_node.c
virtchnl.c
MULTIARCH_SOURCES
rx_node.c
tx_node.c
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,128 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include <vnet/vnet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/pci.h>
#include <vnet/dev/counters.h>
#include <dev_iavf/iavf.h>
#include <dev_iavf/virtchnl.h>
#include <dev_iavf/virtchnl_funcs.h>
VLIB_REGISTER_LOG_CLASS (iavf_log, static) = {
.class_name = "iavf",
.subclass_name = "counters",
};
typedef enum
{
IIAVF_PORT_CTR_RX_BYTES,
IIAVF_PORT_CTR_TX_BYTES,
IIAVF_PORT_CTR_RX_PACKETS,
IIAVF_PORT_CTR_TX_PACKETS,
IIAVF_PORT_CTR_RX_DROPS,
IIAVF_PORT_CTR_TX_DROPS,
IIAVF_PORT_CTR_RX_UCAST,
IIAVF_PORT_CTR_TX_UCAST,
IIAVF_PORT_CTR_RX_MCAST,
IIAVF_PORT_CTR_TX_MCAST,
IIAVF_PORT_CTR_RX_BCAST,
IIAVF_PORT_CTR_TX_BCAST,
IIAVF_PORT_CTR_RX_UNKNOWN_PROTOCOL,
IIAVF_PORT_CTR_TX_ERRORS,
} iavf_port_counter_id_t;
vnet_dev_counter_t iavf_port_counters[] = {
VNET_DEV_CTR_RX_BYTES (IIAVF_PORT_CTR_RX_BYTES),
VNET_DEV_CTR_RX_PACKETS (IIAVF_PORT_CTR_RX_PACKETS),
VNET_DEV_CTR_RX_DROPS (IIAVF_PORT_CTR_RX_DROPS),
VNET_DEV_CTR_VENDOR (IIAVF_PORT_CTR_RX_UCAST, RX, PACKETS, "unicast"),
VNET_DEV_CTR_VENDOR (IIAVF_PORT_CTR_RX_MCAST, RX, PACKETS, "multicast"),
VNET_DEV_CTR_VENDOR (IIAVF_PORT_CTR_RX_BCAST, RX, PACKETS, "broadcast"),
VNET_DEV_CTR_VENDOR (IIAVF_PORT_CTR_RX_UNKNOWN_PROTOCOL, RX, PACKETS,
"unknown protocol"),
VNET_DEV_CTR_TX_BYTES (IIAVF_PORT_CTR_TX_BYTES),
VNET_DEV_CTR_TX_PACKETS (IIAVF_PORT_CTR_TX_PACKETS),
VNET_DEV_CTR_TX_DROPS (IIAVF_PORT_CTR_TX_DROPS),
VNET_DEV_CTR_VENDOR (IIAVF_PORT_CTR_TX_UCAST, TX, PACKETS, "unicast"),
VNET_DEV_CTR_VENDOR (IIAVF_PORT_CTR_TX_MCAST, TX, PACKETS, "multicast"),
VNET_DEV_CTR_VENDOR (IIAVF_PORT_CTR_TX_BCAST, TX, PACKETS, "broadcast"),
VNET_DEV_CTR_VENDOR (IIAVF_PORT_CTR_TX_ERRORS, TX, PACKETS, "errors"),
};
void
iavf_port_add_counters (vlib_main_t *vm, vnet_dev_port_t *port)
{
vnet_dev_port_add_counters (vm, port, iavf_port_counters,
ARRAY_LEN (iavf_port_counters));
}
void
iavf_port_poll_stats (vlib_main_t *vm, vnet_dev_port_t *port)
{
vnet_dev_rv_t rv;
vnet_dev_t *dev = port->dev;
virtchnl_eth_stats_t stats;
iavf_port_t *ap = vnet_dev_get_port_data (port);
virtchnl_queue_select_t qs = { .vsi_id = ap->vsi_id };
rv = iavf_vc_op_get_stats (vm, dev, &qs, &stats);
if (rv != VNET_DEV_OK)
return;
foreach_vnet_dev_counter (c, port->counter_main)
{
switch (c->user_data)
{
case IIAVF_PORT_CTR_RX_BYTES:
vnet_dev_counter_value_update (vm, c, stats.rx_bytes);
break;
case IIAVF_PORT_CTR_TX_BYTES:
vnet_dev_counter_value_update (vm, c, stats.tx_bytes);
break;
case IIAVF_PORT_CTR_RX_PACKETS:
vnet_dev_counter_value_update (
vm, c, stats.rx_unicast + stats.rx_broadcast + stats.rx_multicast);
break;
case IIAVF_PORT_CTR_TX_PACKETS:
vnet_dev_counter_value_update (
vm, c, stats.tx_unicast + stats.tx_broadcast + stats.tx_multicast);
break;
case IIAVF_PORT_CTR_RX_DROPS:
vnet_dev_counter_value_update (vm, c, stats.rx_discards);
break;
case IIAVF_PORT_CTR_TX_DROPS:
vnet_dev_counter_value_update (vm, c, stats.tx_discards);
break;
case IIAVF_PORT_CTR_RX_UCAST:
vnet_dev_counter_value_update (vm, c, stats.rx_unicast);
break;
case IIAVF_PORT_CTR_TX_UCAST:
vnet_dev_counter_value_update (vm, c, stats.tx_unicast);
break;
case IIAVF_PORT_CTR_RX_MCAST:
vnet_dev_counter_value_update (vm, c, stats.rx_multicast);
break;
case IIAVF_PORT_CTR_TX_MCAST:
vnet_dev_counter_value_update (vm, c, stats.tx_multicast);
break;
case IIAVF_PORT_CTR_RX_BCAST:
vnet_dev_counter_value_update (vm, c, stats.rx_broadcast);
break;
case IIAVF_PORT_CTR_TX_BCAST:
vnet_dev_counter_value_update (vm, c, stats.tx_broadcast);
break;
case IIAVF_PORT_CTR_RX_UNKNOWN_PROTOCOL:
vnet_dev_counter_value_update (vm, c, stats.rx_unknown_protocol);
break;
case IIAVF_PORT_CTR_TX_ERRORS:
vnet_dev_counter_value_update (vm, c, stats.tx_errors);
break;
default:
ASSERT (0);
}
}
}

View File

@ -0,0 +1,133 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include <vnet/vnet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/pci.h>
#include <vnet/dev/counters.h>
#include <dev_iavf/iavf.h>
#include <dev_iavf/virtchnl.h>
u8 *
format_iavf_vf_cap_flags (u8 *s, va_list *args)
{
u32 flags = va_arg (*args, u32);
int not_first = 0;
char *strs[32] = {
#define _(a, b, c) [a] = c,
foreach_iavf_vf_cap_flag
#undef _
};
for (int i = 0; i < 32; i++)
{
if ((flags & (1 << i)) == 0)
continue;
if (not_first)
s = format (s, " ");
if (strs[i])
s = format (s, "%s", strs[i]);
else
s = format (s, "unknown(%u)", i);
not_first = 1;
}
return s;
}
u8 *
format_iavf_rx_desc_qw1 (u8 *s, va_list *args)
{
iavf_rx_desc_qw1_t *qw1 = va_arg (*args, iavf_rx_desc_qw1_t *);
s = format (s, "len %u ptype %u ubmcast %u fltstat %u flags", qw1->length,
qw1->ptype, qw1->ubmcast, qw1->fltstat);
#define _(f) \
if (qw1->f) \
s = format (s, " " #f)
_ (dd);
_ (eop);
_ (l2tag1p);
_ (l3l4p);
_ (crcp);
_ (flm);
_ (lpbk);
_ (ipv6exadd);
_ (int_udp_0);
_ (ipe);
_ (l4e);
_ (oversize);
#undef _
return s;
}
u8 *
format_iavf_rx_trace (u8 *s, va_list *args)
{
vlib_main_t *vm = va_arg (*args, vlib_main_t *);
vlib_node_t *node = va_arg (*args, vlib_node_t *);
iavf_rx_trace_t *t = va_arg (*args, iavf_rx_trace_t *);
iavf_rx_desc_qw1_t *qw1;
vnet_main_t *vnm = vnet_get_main ();
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, t->hw_if_index);
u32 indent = format_get_indent (s);
int i = 0;
s = format (s, "avf: %v (%d) qid %u next-node %U flow-id %u", hi->name,
t->hw_if_index, t->qid, format_vlib_next_node_name, vm,
node->index, t->next_index, t->flow_id);
qw1 = (iavf_rx_desc_qw1_t *) t->qw1s;
do
s = format (s, "\n%Udesc %u: %U", format_white_space, indent + 2, i,
format_iavf_rx_desc_qw1, qw1 + i);
while ((qw1[i++].eop) == 0 && i < IAVF_RX_MAX_DESC_IN_CHAIN);
return s;
}
u8 *
format_iavf_port_status (u8 *s, va_list *args)
{
vnet_dev_format_args_t __clib_unused *a =
va_arg (*args, vnet_dev_format_args_t *);
vnet_dev_port_t *port = va_arg (*args, vnet_dev_port_t *);
iavf_port_t *ap = vnet_dev_get_port_data (port);
u32 indent = format_get_indent (s);
s = format (s, "caps: %U", format_iavf_vf_cap_flags, ap->vf_cap_flags);
s = format (s, "\n%Uvsi is %u, RSS key size is %u, RSS lut size is %u",
format_white_space, indent, ap->vsi_id, ap->rss_key_size,
ap->rss_lut_size);
s = format (s, "\n%Uflow offload ", format_white_space, indent);
if (ap->flow_offload)
s = format (s, "enabled, %u flows configured",
vec_len (ap->flow_lookup_entries));
else
s = format (s, "disabled");
return s;
}
u8 *
format_iavf_log (u8 *s, va_list *args)
{
vnet_dev_t *dev = va_arg (*args, vnet_dev_t *);
char *func = va_arg (*args, char *);
if (dev)
s = format (s, "%U", format_vnet_dev_addr, dev);
if (dev && func)
vec_add1 (s, ' ');
if (func)
{
if (strncmp (func, "iavf_", 5) == 0)
func += 5;
s = format (s, "%s", func);
}
vec_add1 (s, ':');
vec_add1 (s, ' ');
return s;
}

271
src/plugins/dev_iavf/iavf.c Normal file
View File

@ -0,0 +1,271 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include <vnet/vnet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/pci.h>
#include <vnet/dev/counters.h>
#include <vppinfra/ring.h>
#include <dev_iavf/iavf.h>
#include <dev_iavf/virtchnl.h>
#include <dev_iavf/virtchnl_funcs.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/plugin/plugin.h>
#include <vpp/app/version.h>
VLIB_REGISTER_LOG_CLASS (iavf_log, static) = {
.class_name = "iavf",
.subclass_name = "init",
};
static const u32 driver_cap_flags =
/**/ VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
/**/ VIRTCHNL_VF_LARGE_NUM_QPAIRS |
/**/ VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
/**/ VIRTCHNL_VF_OFFLOAD_FDIR_PF |
/**/ VIRTCHNL_VF_OFFLOAD_L2 |
/**/ VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
/**/ VIRTCHNL_VF_OFFLOAD_RSS_PF |
/**/ VIRTCHNL_VF_OFFLOAD_RX_POLLING |
/**/ VIRTCHNL_VF_OFFLOAD_VLAN |
/**/ VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
/**/ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
/**/ 0;
static const virtchnl_version_info_t driver_virtchnl_version = {
.major = VIRTCHNL_VERSION_MAJOR,
.minor = VIRTCHNL_VERSION_MINOR,
};
#define _(f, n, s, d) \
{ .name = #n, .desc = d, .severity = VL_COUNTER_SEVERITY_##s },
vlib_error_desc_t iavf_rx_node_counters[] = { foreach_iavf_rx_node_counter };
vlib_error_desc_t iavf_tx_node_counters[] = { foreach_iavf_tx_node_counter };
#undef _
vnet_dev_node_t iavf_rx_node = {
.error_counters = iavf_rx_node_counters,
.n_error_counters = ARRAY_LEN (iavf_rx_node_counters),
.format_trace = format_iavf_rx_trace,
};
vnet_dev_node_t iavf_tx_node = {
.error_counters = iavf_tx_node_counters,
.n_error_counters = ARRAY_LEN (iavf_tx_node_counters),
};
static struct
{
u16 device_id;
char *desc;
} iavf_dev_types[] = {
{ 0x1889, "Intel(R) Adaptive Virtual Function" },
{ 0x154c, "Intel(R) X710 Virtual Function" },
{ 0x37cd, "Intel(R) X722 Virtual Function" },
};
static u8 *
iavf_probe (vlib_main_t *vm, vnet_dev_bus_index_t bus_index, void *dev_info)
{
vnet_dev_bus_pci_device_info_t *di = dev_info;
if (di->vendor_id != 0x8086)
return 0;
FOREACH_ARRAY_ELT (dt, iavf_dev_types)
{
if (dt->device_id == di->device_id)
return format (0, "%s", dt->desc);
}
return 0;
}
static vnet_dev_rv_t
iavf_reset (vlib_main_t *vm, vnet_dev_t *dev)
{
iavf_device_t *ad = vnet_dev_get_data (dev);
u32 n_tries = 50;
iavf_aq_init (vm, dev);
iavf_vc_op_reset_vf (vm, dev);
do
{
if (n_tries-- == 0)
return VNET_DEV_ERR_TIMEOUT;
vlib_process_suspend (vm, 0.02);
}
while ((iavf_reg_read (ad, VFGEN_RSTAT) & 3) != 2);
iavf_aq_init (vm, dev);
iavf_aq_poll_on (vm, dev);
return (VNET_DEV_OK);
}
static vnet_dev_rv_t
iavf_alloc (vlib_main_t *vm, vnet_dev_t *dev)
{
log_debug (dev, "alloc");
return iavf_aq_alloc (vm, dev);
}
static vnet_dev_rv_t
iavf_init (vlib_main_t *vm, vnet_dev_t *dev)
{
iavf_device_t *ad = vnet_dev_get_data (dev);
virtchnl_version_info_t ver;
virtchnl_vf_resource_t res;
vnet_dev_rv_t rv;
log_debug (dev, "init");
if ((rv = vnet_dev_pci_map_region (vm, dev, 0, &ad->bar0)))
return rv;
if ((rv = vnet_dev_pci_bus_master_enable (vm, dev)))
return rv;
if ((rv = iavf_reset (vm, dev)))
return rv;
if ((rv = iavf_vc_op_version (vm, dev, &driver_virtchnl_version, &ver)))
return rv;
if (ver.major != driver_virtchnl_version.major ||
ver.minor != driver_virtchnl_version.minor)
return VNET_DEV_ERR_UNSUPPORTED_DEVICE_VER;
if ((rv = iavf_vc_op_get_vf_resources (vm, dev, &driver_cap_flags, &res)))
return rv;
if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
return VNET_DEV_ERR_UNSUPPORTED_DEVICE;
iavf_port_t iavf_port = {
.vf_cap_flags = res.vf_cap_flags,
.rss_key_size = res.rss_key_size,
.rss_lut_size = res.rss_lut_size,
.max_vectors = res.max_vectors,
.vsi_id = res.vsi_res[0].vsi_id,
.num_qp = res.vsi_res[0].num_queue_pairs,
};
vnet_dev_port_add_args_t port_add_args = {
.port = {
.attr = {
.type = VNET_DEV_PORT_TYPE_ETHERNET,
.max_rx_queues = res.num_queue_pairs,
.max_tx_queues = res.num_queue_pairs,
.max_supported_frame_size = res.max_mtu,
},
.ops = {
.init = iavf_port_init,
.start = iavf_port_start,
.stop = iavf_port_stop,
.config_change = iavf_port_cfg_change,
.format_status = format_iavf_port_status,
},
.data_size = sizeof (iavf_port_t),
.initial_data = &iavf_port,
},
.rx_node = &iavf_rx_node,
.tx_node = &iavf_tx_node,
.rx_queue = {
.config = {
.data_size = sizeof (iavf_rxq_t),
.default_size = 512,
.multiplier = 32,
.min_size = 32,
.max_size = 4096,
.size_is_power_of_two = 1,
},
.ops = {
.alloc = iavf_rx_queue_alloc,
.free = iavf_rx_queue_free,
},
},
.tx_queue = {
.config = {
.data_size = sizeof (iavf_txq_t),
.default_size = 512,
.multiplier = 32,
.min_size = 32,
.max_size = 4096,
.size_is_power_of_two = 1,
},
.ops = {
.alloc = iavf_tx_queue_alloc,
.free = iavf_tx_queue_free,
},
},
};
vnet_dev_set_hw_addr_eth_mac (&port_add_args.port.attr.hw_addr,
res.vsi_res[0].default_mac_addr);
log_info (dev, "MAC address is %U", format_ethernet_address,
res.vsi_res[0].default_mac_addr);
if (vlib_get_n_threads () <= vnet_dev_get_pci_n_msix_interrupts (dev) - 1)
port_add_args.port.attr.caps.interrupt_mode = 1;
else
log_notice (dev,
"number of threads (%u) bigger than number of interrupt lines "
"(%u), interrupt mode disabled",
vlib_get_n_threads (), res.max_vectors);
if (res.vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)
{
if (res.rss_key_size < IAVF_MAX_RSS_KEY_SIZE)
{
log_notice (
dev, "unsupported RSS config provided by device, RSS disabled");
}
else
{
port_add_args.port.attr.caps.rss = 1;
if (res.rss_lut_size > IAVF_MAX_RSS_LUT_SIZE)
log_notice (dev, "device supports bigger RSS LUT than driver");
}
}
return vnet_dev_port_add (vm, dev, 0, &port_add_args);
}
static void
iavf_deinit (vlib_main_t *vm, vnet_dev_t *dev)
{
log_debug (dev, "deinit");
iavf_aq_poll_off (vm, dev);
iavf_aq_deinit (vm, dev);
iavf_aq_free (vm, dev);
}
static void
iavf_free (vlib_main_t *vm, vnet_dev_t *dev)
{
log_debug (dev, "free");
iavf_aq_free (vm, dev);
}
VNET_DEV_REGISTER_DRIVER (avf) = {
.name = "iavf",
.bus = "pci",
.device_data_sz = sizeof (iavf_device_t),
.runtime_temp_space_sz = sizeof (iavf_rt_data_t),
.ops = {
.alloc = iavf_alloc,
.init = iavf_init,
.deinit = iavf_deinit,
.free = iavf_free,
.probe = iavf_probe,
},
};
VLIB_PLUGIN_REGISTER () = {
.version = VPP_BUILD_VER,
.description = "dev_iavf",
};

215
src/plugins/dev_iavf/iavf.h Normal file
View File

@ -0,0 +1,215 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _IIAVF_H_
#define _IIAVF_H_
#include <vppinfra/clib.h>
#include <vppinfra/error_bootstrap.h>
#include <vppinfra/format.h>
#include <vnet/vnet.h>
#include <vnet/dev/dev.h>
#include <dev_iavf/iavf_desc.h>
#include <dev_iavf/virtchnl.h>
#define IAVF_ITR_INT 250
#define IAVF_RX_MAX_DESC_IN_CHAIN 5
#define IAVF_MAX_RSS_KEY_SIZE 52
#define IAVF_MAX_RSS_LUT_SIZE 64
#define IIAVF_AQ_POLL_INTERVAL 0.2
#define IIAVF_AQ_BUF_SIZE 4096
typedef struct iavf_adminq_dma_mem iavf_adminq_dma_mem_t;
typedef struct
{
u8 adminq_active : 1;
void *bar0;
/* Admin queues */
iavf_adminq_dma_mem_t *aq_mem;
u16 atq_next_slot;
u16 arq_next_slot;
virtchnl_pf_event_t *events;
} iavf_device_t;
typedef struct
{
u32 flow_id;
u16 next_index;
i16 buffer_advance;
} iavf_flow_lookup_entry_t;
typedef struct
{
u8 admin_up : 1;
u8 flow_offload : 1;
iavf_flow_lookup_entry_t *flow_lookup_entries;
u32 vf_cap_flags;
u16 vsi_id;
u16 rss_key_size;
u16 rss_lut_size;
u16 num_qp;
u16 max_vectors;
} iavf_port_t;
typedef struct
{
u32 *qtx_tail;
u32 *buffer_indices;
iavf_tx_desc_t *descs;
u16 next;
u16 n_enqueued;
u16 *rs_slots;
iavf_tx_desc_t *tmp_descs;
u32 *tmp_bufs;
u32 *ph_bufs;
} iavf_txq_t;
typedef struct
{
u32 *qrx_tail;
u32 *buffer_indices;
iavf_rx_desc_t *descs;
u16 next;
u16 n_enqueued;
} iavf_rxq_t;
typedef struct
{
u16 qid;
u16 next_index;
u32 hw_if_index;
u32 flow_id;
u64 qw1s[IAVF_RX_MAX_DESC_IN_CHAIN];
} iavf_rx_trace_t;
/* adminq.c */
vnet_dev_rv_t iavf_aq_alloc (vlib_main_t *, vnet_dev_t *);
void iavf_aq_init (vlib_main_t *, vnet_dev_t *);
void iavf_aq_poll_on (vlib_main_t *, vnet_dev_t *);
void iavf_aq_poll_off (vlib_main_t *, vnet_dev_t *);
void iavf_aq_deinit (vlib_main_t *, vnet_dev_t *);
void iavf_aq_free (vlib_main_t *, vnet_dev_t *);
vnet_dev_rv_t iavf_aq_atq_enq (vlib_main_t *, vnet_dev_t *, iavf_aq_desc_t *,
const u8 *, u16, f64);
int iavf_aq_arq_next_acq (vlib_main_t *, vnet_dev_t *, iavf_aq_desc_t **,
u8 **, f64);
void iavf_aq_arq_next_rel (vlib_main_t *, vnet_dev_t *);
format_function_t format_virtchnl_op_name;
format_function_t format_virtchnl_status;
/* format.c */
format_function_t format_iavf_vf_cap_flags;
format_function_t format_iavf_rx_trace;
format_function_t format_iavf_port_status;
format_function_t format_iavf_log;
/* port.c */
vnet_dev_rv_t iavf_port_init (vlib_main_t *, vnet_dev_port_t *);
vnet_dev_rv_t iavf_port_start (vlib_main_t *, vnet_dev_port_t *);
void iavf_port_stop (vlib_main_t *, vnet_dev_port_t *);
vnet_dev_rv_t iavf_port_cfg_change (vlib_main_t *, vnet_dev_port_t *,
vnet_dev_port_cfg_change_req_t *);
/* queue.c */
vnet_dev_rv_t iavf_rx_queue_alloc (vlib_main_t *, vnet_dev_rx_queue_t *);
vnet_dev_rv_t iavf_tx_queue_alloc (vlib_main_t *, vnet_dev_tx_queue_t *);
vnet_dev_rv_t iavf_rx_queue_start (vlib_main_t *, vnet_dev_rx_queue_t *);
vnet_dev_rv_t iavf_tx_queue_start (vlib_main_t *, vnet_dev_tx_queue_t *);
void iavf_rx_queue_stop (vlib_main_t *, vnet_dev_rx_queue_t *);
void iavf_tx_queue_stop (vlib_main_t *, vnet_dev_tx_queue_t *);
void iavf_rx_queue_free (vlib_main_t *, vnet_dev_rx_queue_t *);
void iavf_tx_queue_free (vlib_main_t *, vnet_dev_tx_queue_t *);
/* counter.c */
void iavf_port_poll_stats (vlib_main_t *, vnet_dev_port_t *);
void iavf_port_add_counters (vlib_main_t *, vnet_dev_port_t *);
/* inline funcs */
static inline u32
iavf_get_u32 (void *start, int offset)
{
return *(u32 *) (((u8 *) start) + offset);
}
static inline void
iavf_reg_write (iavf_device_t *ad, u32 addr, u32 val)
{
__atomic_store_n ((u32 *) ((u8 *) ad->bar0 + addr), val, __ATOMIC_RELEASE);
}
static inline u32
iavf_reg_read (iavf_device_t *ad, u32 addr)
{
return __atomic_load_n ((u32 *) (ad->bar0 + addr), __ATOMIC_RELAXED);
;
}
static inline void
iavf_reg_flush (iavf_device_t *ad)
{
iavf_reg_read (ad, VFGEN_RSTAT);
asm volatile("" ::: "memory");
}
#define log_debug(dev, f, ...) \
vlib_log (VLIB_LOG_LEVEL_DEBUG, iavf_log.class, "%U" f, format_iavf_log, \
(dev), __func__, ##__VA_ARGS__)
#define log_info(dev, f, ...) \
vlib_log (VLIB_LOG_LEVEL_INFO, iavf_log.class, "%U: " f, \
format_vnet_dev_addr, (dev), ##__VA_ARGS__)
#define log_notice(dev, f, ...) \
vlib_log (VLIB_LOG_LEVEL_NOTICE, iavf_log.class, "%U: " f, \
format_vnet_dev_addr, (dev), ##__VA_ARGS__)
#define log_warn(dev, f, ...) \
vlib_log (VLIB_LOG_LEVEL_WARNING, iavf_log.class, "%U: " f, \
format_vnet_dev_addr, (dev), ##__VA_ARGS__)
#define log_err(dev, f, ...) \
vlib_log (VLIB_LOG_LEVEL_ERR, iavf_log.class, "%U: " f, \
format_vnet_dev_addr, (dev), ##__VA_ARGS__)
/* temp */
#define IAVF_RX_VECTOR_SZ VLIB_FRAME_SIZE
typedef struct
{
u64 qw1s[IAVF_RX_MAX_DESC_IN_CHAIN - 1];
u32 buffers[IAVF_RX_MAX_DESC_IN_CHAIN - 1];
} iavf_rx_tail_t;
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
vlib_buffer_t *bufs[IAVF_RX_VECTOR_SZ];
u16 next[IAVF_RX_VECTOR_SZ];
u64 qw1s[IAVF_RX_VECTOR_SZ];
u32 flow_ids[IAVF_RX_VECTOR_SZ];
iavf_rx_tail_t tails[IAVF_RX_VECTOR_SZ];
} iavf_rt_data_t;
#define foreach_iavf_tx_node_counter \
_ (SEG_SZ_EXCEEDED, seg_sz_exceeded, ERROR, "segment size exceeded") \
_ (NO_FREE_SLOTS, no_free_slots, ERROR, "no free tx slots")
typedef enum
{
#define _(f, n, s, d) IAVF_TX_NODE_CTR_##f,
foreach_iavf_tx_node_counter
#undef _
} iavf_tx_node_counter_t;
#define foreach_iavf_rx_node_counter \
_ (BUFFER_ALLOC, buffer_alloc, ERROR, "buffer alloc error")
typedef enum
{
#define _(f, n, s, d) IAVF_RX_NODE_CTR_##f,
foreach_iavf_rx_node_counter
#undef _
} iavf_rx_node_counter_t;
#endif /* _IIAVF_H_ */

View File

@ -0,0 +1,125 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _IIAVF_DESC_H_
#define _IIAVF_DESC_H_
#include <vppinfra/clib.h>
#include <vppinfra/error_bootstrap.h>
#include <vppinfra/format.h>
#include <vnet/vnet.h>
#include <vnet/dev/dev.h>
#include <dev_iavf/virtchnl.h>
#define IAVF_RX_MAX_DESC_IN_CHAIN 5
#define IAVF_TXD_CMD(x) (1 << (x + 4))
#define IAVF_TXD_CMD_EXT(x, val) ((u64) val << (x + 4))
#define IAVF_TXD_CMD_EOP IAVF_TXD_CMD (0)
#define IAVF_TXD_CMD_RS IAVF_TXD_CMD (1)
#define IAVF_TXD_CMD_RSV IAVF_TXD_CMD (2)
#define IAVF_TXD_CMD_IIPT_NONE IAVF_TXD_CMD_EXT (5, 0)
#define IAVF_TXD_CMD_IIPT_IPV6 IAVF_TXD_CMD_EXT (5, 1)
#define IAVF_TXD_CMD_IIPT_IPV4_NO_CSUM IAVF_TXD_CMD_EXT (5, 2)
#define IAVF_TXD_CMD_IIPT_IPV4 IAVF_TXD_CMD_EXT (5, 3)
#define IAVF_TXD_CMD_L4T_UNKNOWN IAVF_TXD_CMD_EXT (8, 0)
#define IAVF_TXD_CMD_L4T_TCP IAVF_TXD_CMD_EXT (8, 1)
#define IAVF_TXD_CMD_L4T_SCTP IAVF_TXD_CMD_EXT (8, 2)
#define IAVF_TXD_CMD_L4T_UDP IAVF_TXD_CMD_EXT (8, 3)
#define IAVF_TXD_OFFSET(x, factor, val) \
(((u64) val / (u64) factor) << (16 + x))
#define IAVF_TXD_OFFSET_MACLEN(val) IAVF_TXD_OFFSET (0, 2, val)
#define IAVF_TXD_OFFSET_IPLEN(val) IAVF_TXD_OFFSET (7, 4, val)
#define IAVF_TXD_OFFSET_L4LEN(val) IAVF_TXD_OFFSET (14, 4, val)
#define IAVF_TXD_DTYP_CTX 0x1ULL
#define IAVF_TXD_CTX_CMD_TSO IAVF_TXD_CMD (0)
#define IAVF_TXD_CTX_SEG(val, x) (((u64) val) << (30 + x))
#define IAVF_TXD_CTX_SEG_TLEN(val) IAVF_TXD_CTX_SEG (val, 0)
#define IAVF_TXD_CTX_SEG_MSS(val) IAVF_TXD_CTX_SEG (val, 20)
typedef union
{
struct
{
u32 mirr : 13;
u32 _reserved1 : 3;
u32 l2tag1 : 16;
u32 filter_status;
};
u64 as_u64;
} iavf_rx_desc_qw0_t;
typedef union
{
struct
{
/* status */
u64 dd : 1;
u64 eop : 1;
u64 l2tag1p : 1;
u64 l3l4p : 1;
u64 crcp : 1;
u64 _reserved2 : 4;
u64 ubmcast : 2;
u64 flm : 1;
u64 fltstat : 2;
u64 lpbk : 1;
u64 ipv6exadd : 1;
u64 _reserved3 : 2;
u64 int_udp_0 : 1;
/* error */
u64 _reserved_err0 : 3;
u64 ipe : 1;
u64 l4e : 1;
u64 _reserved_err5 : 1;
u64 oversize : 1;
u64 _reserved_err7 : 1;
u64 rsv2 : 3;
u64 ptype : 8;
u64 length : 26;
};
u64 as_u64;
} iavf_rx_desc_qw1_t;
STATIC_ASSERT_SIZEOF (iavf_rx_desc_qw0_t, 8);
STATIC_ASSERT_SIZEOF (iavf_rx_desc_qw1_t, 8);
typedef struct
{
union
{
struct
{
iavf_rx_desc_qw0_t qw0;
iavf_rx_desc_qw0_t qw1;
u64 rsv3 : 64;
u32 flex_lo;
u32 fdid_flex_hi;
};
u64 qword[4];
u64 addr;
#ifdef CLIB_HAVE_VEC256
u64x4 as_u64x4;
#endif
};
} iavf_rx_desc_t;
STATIC_ASSERT_SIZEOF (iavf_rx_desc_t, 32);
typedef struct
{
union
{
u64 qword[2];
#ifdef CLIB_HAVE_VEC128
u64x2 as_u64x2;
#endif
};
} iavf_tx_desc_t;
STATIC_ASSERT_SIZEOF (iavf_tx_desc_t, 16);
#endif /* _IIAVF_DESC_H_ */

File diff suppressed because it is too large Load Diff

442
src/plugins/dev_iavf/port.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,178 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#include <vnet/vnet.h>
#include <vnet/dev/dev.h>
#include <vnet/dev/pci.h>
#include <vnet/dev/counters.h>
#include <vppinfra/ring.h>
#include <dev_iavf/iavf.h>
#include <dev_iavf/virtchnl.h>
#include <dev_iavf/virtchnl_funcs.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/plugin/plugin.h>
#include <vpp/app/version.h>
VLIB_REGISTER_LOG_CLASS (iavf_log, static) = {
.class_name = "iavf",
.subclass_name = "queue",
};
vnet_dev_rv_t
iavf_rx_queue_alloc (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
{
vnet_dev_port_t *port = rxq->port;
vnet_dev_t *dev = port->dev;
iavf_device_t *ad = vnet_dev_get_data (dev);
iavf_rxq_t *arq = vnet_dev_get_rx_queue_data (rxq);
vnet_dev_rv_t rv;
arq->buffer_indices = clib_mem_alloc_aligned (
rxq->size * sizeof (arq->buffer_indices[0]), CLIB_CACHE_LINE_BYTES);
if ((rv =
vnet_dev_dma_mem_alloc (vm, dev, sizeof (iavf_rx_desc_t) * rxq->size,
0, (void **) &arq->descs)))
return rv;
arq->qrx_tail = ad->bar0 + IAVF_QRX_TAIL (rxq->queue_id);
log_debug (dev, "queue %u alocated", rxq->queue_id);
return rv;
}
void
iavf_rx_queue_free (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
{
vnet_dev_t *dev = rxq->port->dev;
iavf_rxq_t *aq = vnet_dev_get_rx_queue_data (rxq);
log_debug (dev, "queue %u", rxq->queue_id);
vnet_dev_dma_mem_free (vm, dev, aq->descs);
foreach_pointer (p, aq->buffer_indices)
if (p)
clib_mem_free (p);
}
vnet_dev_rv_t
iavf_tx_queue_alloc (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
{
vnet_dev_t *dev = txq->port->dev;
iavf_device_t *ad = vnet_dev_get_data (dev);
iavf_txq_t *atq = vnet_dev_get_tx_queue_data (txq);
vnet_dev_rv_t rv;
if ((rv =
vnet_dev_dma_mem_alloc (vm, dev, sizeof (iavf_tx_desc_t) * txq->size,
0, (void **) &atq->descs)))
return rv;
clib_ring_new_aligned (atq->rs_slots, 32, CLIB_CACHE_LINE_BYTES);
atq->buffer_indices = clib_mem_alloc_aligned (
txq->size * sizeof (atq->buffer_indices[0]), CLIB_CACHE_LINE_BYTES);
atq->tmp_descs = clib_mem_alloc_aligned (
sizeof (atq->tmp_descs[0]) * txq->size, CLIB_CACHE_LINE_BYTES);
atq->tmp_bufs = clib_mem_alloc_aligned (
sizeof (atq->tmp_bufs[0]) * txq->size, CLIB_CACHE_LINE_BYTES);
atq->qtx_tail = ad->bar0 + IAVF_QTX_TAIL (txq->queue_id);
log_debug (dev, "queue %u alocated", txq->queue_id);
return VNET_DEV_OK;
}
void
iavf_tx_queue_free (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
{
vnet_dev_t *dev = txq->port->dev;
iavf_txq_t *atq = vnet_dev_get_tx_queue_data (txq);
iavf_txq_t *aq = vnet_dev_get_tx_queue_data (txq);
log_debug (dev, "queue %u", txq->queue_id);
vnet_dev_dma_mem_free (vm, dev, aq->descs);
clib_ring_free (atq->rs_slots);
foreach_pointer (p, aq->tmp_descs, aq->tmp_bufs, aq->buffer_indices)
if (p)
clib_mem_free (p);
}
vnet_dev_rv_t
iavf_rx_queue_start (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
{
vnet_dev_t *dev = rxq->port->dev;
iavf_rxq_t *arq = vnet_dev_get_rx_queue_data (rxq);
iavf_rx_desc_t *d = arq->descs;
u32 n_enq, *bi = arq->buffer_indices;
u8 bpi = vnet_dev_get_rx_queue_buffer_pool_index (rxq);
n_enq = vlib_buffer_alloc_from_pool (vm, bi, rxq->size - 8, bpi);
if (n_enq < 8)
{
if (n_enq)
vlib_buffer_free (vm, bi, n_enq);
return VNET_DEV_ERR_BUFFER_ALLOC_FAIL;
}
for (u32 i = 0; i < n_enq; i++)
{
vlib_buffer_t *b = vlib_get_buffer (vm, bi[i]);
u64 dma_addr = vnet_dev_get_dma_addr (vm, dev, b->data);
d[i] = (iavf_rx_desc_t){ .addr = dma_addr };
}
arq->n_enqueued = n_enq;
arq->next = 0;
__atomic_store_n (arq->qrx_tail, n_enq, __ATOMIC_RELEASE);
return VNET_DEV_OK;
}
void
iavf_rx_queue_stop (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
{
iavf_rxq_t *arq = vnet_dev_get_rx_queue_data (rxq);
__atomic_store_n (arq->qrx_tail, 0, __ATOMIC_RELAXED);
if (arq->n_enqueued)
{
vlib_buffer_free_from_ring_no_next (vm, arq->buffer_indices, arq->next,
rxq->size, arq->n_enqueued);
log_debug (rxq->port->dev, "%u buffers freed from rx queue %u",
arq->n_enqueued, rxq->queue_id);
}
arq->n_enqueued = arq->next = 0;
}
vnet_dev_rv_t
iavf_tx_queue_start (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
{
iavf_txq_t *atq = vnet_dev_get_tx_queue_data (txq);
atq->next = 0;
atq->n_enqueued = 0;
clib_ring_reset (atq->rs_slots);
__atomic_store_n (atq->qtx_tail, 0, __ATOMIC_RELAXED);
return VNET_DEV_OK;
}
void
iavf_tx_queue_stop (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
{
iavf_txq_t *atq = vnet_dev_get_tx_queue_data (txq);
log_debug (txq->port->dev, "queue %u", txq->queue_id);
__atomic_store_n (atq->qtx_tail, 0, __ATOMIC_RELAXED);
if (atq->n_enqueued)
{
vlib_buffer_free_from_ring_no_next (vm, atq->buffer_indices,
atq->next - atq->n_enqueued,
txq->size, atq->n_enqueued);
log_debug (txq->port->dev, "%u buffers freed from tx queue %u",
atq->n_enqueued, txq->queue_id);
}
atq->n_enqueued = atq->next = 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,241 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2023 Cisco Systems, Inc.
*/
#ifndef _IIAVF_VIRTCHNL_FUNCS_H_
#define _IIAVF_VIRTCHNL_FUNCS_H_
#include <vppinfra/clib.h>
#include <vnet/dev/dev.h>
#include <dev_iavf/iavf.h>
#define VIRTCHNL_MSG_SZ(s, e, n) STRUCT_OFFSET_OF (s, e[(n) + 1])
typedef struct
{
virtchnl_op_t op;
u8 no_reply : 1;
u16 req_sz;
u16 resp_sz;
virtchnl_status_t status;
const void *req;
void *resp;
} iavf_virtchnl_req_t;
vnet_dev_rv_t iavf_virtchnl_req (vlib_main_t *, vnet_dev_t *,
iavf_virtchnl_req_t *);
static_always_inline vnet_dev_rv_t
iavf_vc_op_version (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_version_info_t *req,
virtchnl_version_info_t *resp)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_VERSION,
.req = req,
.req_sz = sizeof (*req),
.resp = resp,
.resp_sz = sizeof (*resp),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_reset_vf (vlib_main_t *vm, vnet_dev_t *dev)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_RESET_VF,
.no_reply = 1,
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_get_vf_resources (vlib_main_t *vm, vnet_dev_t *dev, const u32 *req,
virtchnl_vf_resource_t *resp)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_GET_VF_RESOURCES,
.req = req,
.req_sz = sizeof (*req),
.resp = resp,
.resp_sz = sizeof (*resp),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_enable_queues (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_queue_select_t *req)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_ENABLE_QUEUES,
.req = req,
.req_sz = sizeof (*req),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_disable_queues (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_queue_select_t *req)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_DISABLE_QUEUES,
.req = req,
.req_sz = sizeof (*req),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_config_vsi_queues (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_vsi_queue_config_info_t *req)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_CONFIG_VSI_QUEUES,
.req = req,
.req_sz = VIRTCHNL_MSG_SZ (virtchnl_vsi_queue_config_info_t, qpair,
req->num_queue_pairs),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_config_irq_map (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_irq_map_info_t *req)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_CONFIG_IRQ_MAP,
.req = req,
.req_sz =
VIRTCHNL_MSG_SZ (virtchnl_irq_map_info_t, vecmap, req->num_vectors),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_config_rss_lut (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_rss_lut_t *req)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_CONFIG_RSS_LUT,
.req = req,
.req_sz = VIRTCHNL_MSG_SZ (virtchnl_rss_lut_t, lut, req->lut_entries),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_config_rss_key (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_rss_key_t *req)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_CONFIG_RSS_KEY,
.req = req,
.req_sz = VIRTCHNL_MSG_SZ (virtchnl_rss_key_t, key, req->key_len),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_config_promisc_mode (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_promisc_info_t *req)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
.req = req,
.req_sz = sizeof (*req),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_disable_vlan_stripping (vlib_main_t *vm, vnet_dev_t *dev)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_add_eth_addr (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_ether_addr_list_t *req)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_ADD_ETH_ADDR,
.req = req,
.req_sz =
VIRTCHNL_MSG_SZ (virtchnl_ether_addr_list_t, list, req->num_elements),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_del_eth_addr (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_ether_addr_list_t *req)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_DEL_ETH_ADDR,
.req = req,
.req_sz =
VIRTCHNL_MSG_SZ (virtchnl_ether_addr_list_t, list, req->num_elements),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_get_offload_vlan_v2_caps (vlib_main_t *vm, vnet_dev_t *dev,
virtchnl_vlan_caps_t *resp)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
.resp = resp,
.resp_sz = sizeof (*resp),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_get_stats (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_queue_select_t *req,
virtchnl_eth_stats_t *resp)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_GET_STATS,
.req = req,
.req_sz = sizeof (*req),
.resp = resp,
.resp_sz = sizeof (*resp),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
static_always_inline vnet_dev_rv_t
iavf_vc_op_disable_vlan_stripping_v2 (vlib_main_t *vm, vnet_dev_t *dev,
const virtchnl_vlan_setting_t *req)
{
iavf_virtchnl_req_t vr = {
.op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
.req = req,
.req_sz = sizeof (*req),
};
return iavf_virtchnl_req (vm, dev, &vr);
}
#endif /* _IIAVF_VIRTCHNL_FUNCS_H_ */

View File

@ -32,20 +32,25 @@ clib_ring_header (void *v)
return vec_header (v);
}
always_inline void
clib_ring_reset (void *v)
{
clib_ring_header_t *h = clib_ring_header (v);
h->next = 0;
h->n_enq = 0;
}
always_inline void
clib_ring_new_inline (void **p, u32 elt_bytes, u32 size, u32 align)
{
void *v;
clib_ring_header_t *h;
vec_attr_t va = { .elt_sz = elt_bytes,
.hdr_sz = sizeof (clib_ring_header_t),
.align = align };
v = _vec_alloc_internal (size, &va);
h = clib_ring_header (v);
h->next = 0;
h->n_enq = 0;
clib_ring_reset (v);
p[0] = v;
}