iavf: interrupt mode support

Type: improvement
Change-Id: Ie5fcaa706ab0995e0021cf1ee74b95c5a3b30283
Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
Damjan Marion
2023-11-06 00:05:57 +00:00
committed by Dave Wallace
parent 14bfd3d8b8
commit 67f03ba71d
7 changed files with 241 additions and 86 deletions
+36 -32
View File
@@ -8,6 +8,7 @@
#include <vnet/dev/pci.h>
#include <vnet/dev/counters.h>
#include <dev_iavf/iavf.h>
#include <dev_iavf/iavf_regs.h>
#include <dev_iavf/virtchnl.h>
#include <vnet/ethernet/ethernet.h>
@@ -34,6 +35,36 @@ struct iavf_adminq_dma_mem
} arq_bufs[IIAVF_AQ_ARQ_LEN];
};
static const iavf_dyn_ctl dyn_ctl0_disable = {
.itr_indx = 3,
};
static const iavf_dyn_ctl dyn_ctl0_enable = {
.intena = 1,
.clearpba = 1,
.itr_indx = 3,
};
static const iavf_vfint_icr0_ena1 icr0_ena1_aq_enable = {
.adminq = 1,
};
static inline void
iavf_irq_0_disable (iavf_device_t *ad)
{
iavf_reg_write (ad, IAVF_VFINT_ICR0_ENA1, 0);
iavf_reg_write (ad, IAVF_VFINT_DYN_CTL0, dyn_ctl0_disable.as_u32);
iavf_reg_flush (ad);
}
static inline void
iavf_irq_0_enable (iavf_device_t *ad)
{
iavf_reg_write (ad, IAVF_VFINT_ICR0_ENA1, icr0_ena1_aq_enable.as_u32);
iavf_reg_write (ad, IAVF_VFINT_DYN_CTL0, dyn_ctl0_enable.as_u32);
iavf_reg_flush (ad);
}
static_always_inline int
iavf_aq_desc_is_done (iavf_aq_desc_t *d)
{
@@ -242,39 +273,12 @@ iavf_aq_poll (vlib_main_t *vm, vnet_dev_t *dev)
}
}
static inline void
iavf_irq_0_set_state (iavf_device_t *ad, int enable)
{
u32 dyn_ctl0 = 0, icr0_ena = 0;
dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
iavf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
iavf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
iavf_reg_flush (ad);
if (!enable)
return;
dyn_ctl0 = 0;
icr0_ena = 0;
icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
dyn_ctl0 |= (2 << 3); /* [4:3] ITR Index, 11b = No ITR update */
dyn_ctl0 |= ((IAVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
iavf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
iavf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
iavf_reg_flush (ad);
}
static void
iavf_adminq_msix_handler (vlib_main_t *vm, vnet_dev_t *dev, u16 line)
{
log_debug (dev, "MSI-X interrupt 0 received");
iavf_device_t *ad = vnet_dev_get_data (dev);
iavf_reg_write (ad, IAVF_VFINT_DYN_CTL0, dyn_ctl0_enable.as_u32);
log_debug (dev, "MSI-X interrupt %u received", line);
vnet_dev_process_call_op_no_wait (vm, dev, iavf_aq_poll);
}
@@ -335,7 +339,7 @@ iavf_aq_poll_on (vlib_main_t *vm, vnet_dev_t *dev)
else
vnet_dev_pci_intx_add_handler (vm, dev, iavf_adminq_intx_handler);
iavf_irq_0_set_state (ad, 1);
iavf_irq_0_enable (ad);
}
void
@@ -343,7 +347,7 @@ iavf_aq_poll_off (vlib_main_t *vm, vnet_dev_t *dev)
{
iavf_device_t *ad = vnet_dev_get_data (dev);
iavf_irq_0_set_state (ad, 0);
iavf_irq_0_disable (ad);
vnet_dev_poll_dev_remove (vm, dev, iavf_aq_poll);
+15 -7
View File
@@ -98,7 +98,7 @@ iavf_reset (vlib_main_t *vm, vnet_dev_t *dev)
return VNET_DEV_ERR_TIMEOUT;
vlib_process_suspend (vm, 0.02);
}
while ((iavf_reg_read (ad, VFGEN_RSTAT) & 3) != 2);
while ((iavf_reg_read (ad, IAVF_VFGEN_RSTAT) & 3) != 2);
iavf_aq_init (vm, dev);
iavf_aq_poll_on (vm, dev);
@@ -118,6 +118,7 @@ iavf_init (vlib_main_t *vm, vnet_dev_t *dev)
iavf_device_t *ad = vnet_dev_get_data (dev);
virtchnl_version_info_t ver;
virtchnl_vf_resource_t res;
u32 n_threads = vlib_get_n_threads ();
vnet_dev_rv_t rv;
log_debug (dev, "init");
@@ -209,13 +210,20 @@ iavf_init (vlib_main_t *vm, vnet_dev_t *dev)
log_info (dev, "MAC address is %U", format_ethernet_address,
res.vsi_res[0].default_mac_addr);
if (vlib_get_n_threads () <= vnet_dev_get_pci_n_msix_interrupts (dev) - 1)
port_add_args.port.attr.caps.interrupt_mode = 1;
if (n_threads <= vnet_dev_get_pci_n_msix_interrupts (dev) - 1)
{
port_add_args.port.attr.caps.interrupt_mode = 1;
iavf_port.n_rx_vectors = n_threads;
}
else
log_notice (dev,
"number of threads (%u) bigger than number of interrupt lines "
"(%u), interrupt mode disabled",
vlib_get_n_threads (), res.max_vectors);
{
log_notice (
dev,
"number of threads (%u) bigger than number of interrupt lines "
"(%u), interrupt mode disabled",
vlib_get_n_threads (), res.max_vectors);
iavf_port.n_rx_vectors = 1;
}
if (res.vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)
{
+3 -2
View File
@@ -32,7 +32,6 @@ typedef struct
u16 atq_next_slot;
u16 arq_next_slot;
virtchnl_pf_event_t *events;
} iavf_device_t;
typedef struct
@@ -47,12 +46,14 @@ typedef struct
u8 admin_up : 1;
u8 flow_offload : 1;
iavf_flow_lookup_entry_t *flow_lookup_entries;
u64 intr_mode_per_rxq_bitmap;
u32 vf_cap_flags;
u16 vsi_id;
u16 rss_key_size;
u16 rss_lut_size;
u16 num_qp;
u16 max_vectors;
u16 n_rx_vectors;
} iavf_port_t;
typedef struct
@@ -151,7 +152,7 @@ iavf_reg_read (iavf_device_t *ad, u32 addr)
static inline void
iavf_reg_flush (iavf_device_t *ad)
{
iavf_reg_read (ad, VFGEN_RSTAT);
iavf_reg_read (ad, IAVF_VFGEN_RSTAT);
asm volatile("" ::: "memory");
}
+15 -2
View File
@@ -344,8 +344,21 @@ typedef union
u32 intena_msk : 1;
};
u32 as_u32;
} iavf_dyn_ctln;
} iavf_dyn_ctl;
STATIC_ASSERT_SIZEOF (iavf_dyn_ctln, 4);
STATIC_ASSERT_SIZEOF (iavf_dyn_ctl, 4);
typedef union
{
struct
{
u32 _reserved0 : 30;
u32 adminq : 1;
u32 _reserved31 : 1;
};
u32 as_u32;
} iavf_vfint_icr0_ena1;
STATIC_ASSERT_SIZEOF (iavf_vfint_icr0_ena1, 4);
#endif /* _IIAVF_REGS_H_ */
+125 -26
View File
@@ -24,16 +24,15 @@ static const u8 default_rss_key[] = {
0x55, 0x7d, 0x99, 0x58, 0x3a, 0xe1, 0x38, 0xc9, 0x2e, 0x81, 0x15, 0x03, 0x66,
};
const static iavf_dyn_ctln dyn_ctln_disabled = {};
const static iavf_dyn_ctln dyn_ctln_enabled = {
const static iavf_dyn_ctl dyn_ctln_disabled = {};
const static iavf_dyn_ctl dyn_ctln_enabled = {
.intena = 1,
.clearpba = 1,
.interval = IAVF_ITR_INT / 2,
.intena = 1,
};
const static iavf_dyn_ctln dyn_ctln_wb_on_itr = {
.clearpba = 1,
const static iavf_dyn_ctl dyn_ctln_wb_on_itr = {
.itr_indx = 1,
.interval = 32 / 2,
.interval = 2,
.wb_on_itr = 1,
};
@@ -172,22 +171,30 @@ iavf_port_init_vsi_queues (vlib_main_t *vm, vnet_dev_port_t *port)
}
vnet_dev_rv_t
iavf_port_rx_irq_enable_disable (vlib_main_t *vm, vnet_dev_port_t *port,
int enable)
iavf_port_rx_irq_config (vlib_main_t *vm, vnet_dev_port_t *port, int enable)
{
vnet_dev_t *dev = port->dev;
iavf_device_t *ad = vnet_dev_get_data (dev);
iavf_port_t *ap = vnet_dev_get_port_data (port);
u16 n_threads = vlib_get_n_threads ();
u8 buffer[VIRTCHNL_MSG_SZ (virtchnl_irq_map_info_t, vecmap, n_threads)];
u16 n_rx_vectors = ap->n_rx_vectors;
u8 buffer[VIRTCHNL_MSG_SZ (virtchnl_irq_map_info_t, vecmap, n_rx_vectors)];
u8 n_intr_mode_queues_per_vector[n_rx_vectors];
u8 n_queues_per_vector[n_rx_vectors];
virtchnl_irq_map_info_t *im = (virtchnl_irq_map_info_t *) buffer;
vnet_dev_rv_t rv;
log_debug (dev, "intr mode per queue bitmap 0x%x",
ap->intr_mode_per_rxq_bitmap);
for (u32 i = 0; i < n_rx_vectors; i++)
n_intr_mode_queues_per_vector[i] = n_queues_per_vector[i] = 0;
*im = (virtchnl_irq_map_info_t){
.num_vectors = n_rx_vectors,
};
if (port->attr.caps.interrupt_mode)
{
*im = (virtchnl_irq_map_info_t){
.num_vectors = n_threads,
};
for (u16 i = 0; i < im->num_vectors; i++)
im->vecmap[i] = (virtchnl_vector_map_t){
.vsi_id = ap->vsi_id,
@@ -196,16 +203,19 @@ iavf_port_rx_irq_enable_disable (vlib_main_t *vm, vnet_dev_port_t *port,
if (enable)
foreach_vnet_dev_port_rx_queue (rxq, port)
if (rxq->enabled)
im->vecmap[rxq->rx_thread_index].rxq_map |= 1 << rxq->queue_id;
{
u32 i = rxq->rx_thread_index;
im->vecmap[i].rxq_map |= 1 << rxq->queue_id;
n_queues_per_vector[i]++;
n_intr_mode_queues_per_vector[i] +=
u64_is_bit_set (ap->intr_mode_per_rxq_bitmap, rxq->queue_id);
}
}
else
{
*im = (virtchnl_irq_map_info_t){
.num_vectors = 1,
.vecmap[0] = {
.vsi_id = ap->vsi_id,
.vector_id = 1,
},
im->vecmap[0] = (virtchnl_vector_map_t){
.vsi_id = ap->vsi_id,
.vector_id = 1,
};
if (enable)
foreach_vnet_dev_port_rx_queue (rxq, port)
@@ -216,36 +226,64 @@ iavf_port_rx_irq_enable_disable (vlib_main_t *vm, vnet_dev_port_t *port,
if ((rv = iavf_vc_op_config_irq_map (vm, dev, im)))
return rv;
for (int i = 0; i < im->num_vectors; i++)
for (int i = 0; i < n_rx_vectors; i++)
{
u32 val;
if (enable == 0)
if (enable == 0 || n_queues_per_vector[i] == 0)
val = dyn_ctln_disabled.as_u32;
else if (ap->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
else if (ap->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR &&
n_intr_mode_queues_per_vector[i] == 0)
val = dyn_ctln_wb_on_itr.as_u32;
else
val = dyn_ctln_enabled.as_u32;
iavf_reg_write (ad, AVFINT_DYN_CTLN (im->vecmap[i].vector_id), val);
iavf_reg_write (ad, IAVF_VFINT_DYN_CTLN (i), val);
log_debug (dev, "VFINT_DYN_CTLN(%u) set to 0x%x", i, val);
}
return rv;
}
static void
avf_msix_n_handler (vlib_main_t *vm, vnet_dev_t *dev, u16 line)
{
iavf_device_t *ad = vnet_dev_get_data (dev);
vnet_dev_port_t *port = vnet_dev_get_port_by_id (dev, 0);
line--;
iavf_reg_write (ad, IAVF_VFINT_DYN_CTLN (line), dyn_ctln_enabled.as_u32);
vlib_node_set_interrupt_pending (vlib_get_main_by_index (line),
port->intf.rx_node_index);
}
vnet_dev_rv_t
iavf_port_init (vlib_main_t *vm, vnet_dev_port_t *port)
{
vnet_dev_t *dev = port->dev;
iavf_port_t *ap = vnet_dev_get_port_data (port);
vnet_dev_rv_t rv;
log_debug (port->dev, "port %u", port->port_id);
ap->intr_mode_per_rxq_bitmap = 0;
foreach_vnet_dev_port_rx_queue (q, port)
if (q->interrupt_mode)
u64_bit_set (&ap->intr_mode_per_rxq_bitmap, q->queue_id, 1);
if ((rv = iavf_port_vlan_strip_disable (vm, port)))
return rv;
if ((rv = iavf_port_init_rss (vm, port)))
return rv;
vnet_dev_pci_msix_add_handler (vm, dev, &avf_msix_n_handler, 1,
ap->n_rx_vectors);
vnet_dev_pci_msix_enable (vm, dev, 1, ap->n_rx_vectors);
for (u32 i = 1; i < ap->n_rx_vectors; i++)
vnet_dev_pci_msix_set_polling_thread (vm, dev, i + 1, i);
if (port->dev->poll_stats)
iavf_port_add_counters (vm, port);
@@ -296,7 +334,7 @@ iavf_port_start (vlib_main_t *vm, vnet_dev_port_t *port)
if ((rv = iavf_port_init_vsi_queues (vm, port)))
goto done;
if ((rv = iavf_port_rx_irq_enable_disable (vm, port, /* enable */ 1)))
if ((rv = iavf_port_rx_irq_config (vm, port, /* enable */ 1)))
goto done;
if ((rv = iavf_enable_disable_queues (vm, port, 1)))
@@ -322,7 +360,7 @@ iavf_port_stop (vlib_main_t *vm, vnet_dev_port_t *port)
log_debug (port->dev, "port %u", port->port_id);
iavf_enable_disable_queues (vm, port, /* enable */ 0);
iavf_port_rx_irq_enable_disable (vm, port, /* disable */ 0);
iavf_port_rx_irq_config (vm, port, /* disable */ 0);
if (port->dev->poll_stats)
vnet_dev_poll_port_remove (vm, port, iavf_port_poll_stats);
@@ -387,6 +425,57 @@ iavf_port_add_del_eth_addr (vlib_main_t *vm, vnet_dev_port_t *port,
iavf_vc_op_del_eth_addr (vm, port->dev, &al);
}
static vnet_dev_rv_t
iavf_port_cfg_rxq_int_mode_change (vlib_main_t *vm, vnet_dev_port_t *port,
u16 qid, u8 state, u8 all)
{
vnet_dev_rv_t rv = VNET_DEV_OK;
iavf_port_t *ap = vnet_dev_get_port_data (port);
vnet_dev_t *dev = port->dev;
char *ed = state ? "ena" : "disa";
char qstr[16];
u64 old, new = 0;
state = state != 0;
old = ap->intr_mode_per_rxq_bitmap;
if (all)
{
snprintf (qstr, sizeof (qstr), "all queues");
if (state)
foreach_vnet_dev_port_rx_queue (q, port)
u64_bit_set (&new, q->queue_id, 1);
}
else
{
snprintf (qstr, sizeof (qstr), "queue %u", qid);
new = old;
u64_bit_set (&new, qid, state);
}
if (new == old)
{
log_warn (dev, "interrupt mode already %sbled on %s", ed, qstr);
return rv;
}
ap->intr_mode_per_rxq_bitmap = new;
if (port->started)
{
if ((rv = iavf_port_rx_irq_config (vm, port, 1)))
{
ap->intr_mode_per_rxq_bitmap = old;
log_err (dev, "failed to %sble interrupt mode on %s", ed, qstr);
return rv;
}
}
log_debug (dev, "interrupt mode %sbled on %s, new bitmap is 0x%x", ed, qstr,
new);
return rv;
}
vnet_dev_rv_t
iavf_port_cfg_change (vlib_main_t *vm, vnet_dev_port_t *port,
vnet_dev_port_cfg_change_req_t *req)
@@ -434,6 +523,16 @@ iavf_port_cfg_change (vlib_main_t *vm, vnet_dev_port_t *port,
case VNET_DEV_PORT_CFG_MAX_FRAME_SIZE:
break;
case VNET_DEV_PORT_CFG_RXQ_INTR_MODE_ENABLE:
rv = iavf_port_cfg_rxq_int_mode_change (vm, port, req->queue_id, 1,
req->all_queues);
break;
case VNET_DEV_PORT_CFG_RXQ_INTR_MODE_DISABLE:
rv = iavf_port_cfg_rxq_int_mode_change (vm, port, req->queue_id, 0,
req->all_queues);
break;
default:
return VNET_DEV_ERR_NOT_SUPPORTED;
};
+17 -17
View File
@@ -19,23 +19,23 @@ enum
#undef _
};
#define AVFINT_DYN_CTLN(x) (0x00003800 + (0x4 * x))
#define AVFINT_ICR0 0x00004800
#define AVFINT_ICR0_ENA1 0x00005000
#define AVFINT_DYN_CTL0 0x00005C00
#define IAVF_ARQBAH 0x00006000
#define IAVF_ATQH 0x00006400
#define IAVF_ATQLEN 0x00006800
#define IAVF_ARQBAL 0x00006C00
#define IAVF_ARQT 0x00007000
#define IAVF_ARQH 0x00007400
#define IAVF_ATQBAH 0x00007800
#define IAVF_ATQBAL 0x00007C00
#define IAVF_ARQLEN 0x00008000
#define IAVF_ATQT 0x00008400
#define VFGEN_RSTAT 0x00008800
#define IAVF_QTX_TAIL(q) (0x00000000 + (0x4 * q))
#define IAVF_QRX_TAIL(q) (0x00002000 + (0x4 * q))
#define IAVF_VFINT_DYN_CTLN(x) (0x00003800 + (0x4 * x))
#define IAVF_VFINT_ICR0 0x00004800
#define IAVF_VFINT_ICR0_ENA1 0x00005000
#define IAVF_VFINT_DYN_CTL0 0x00005C00
#define IAVF_ARQBAH 0x00006000
#define IAVF_ATQH 0x00006400
#define IAVF_ATQLEN 0x00006800
#define IAVF_ARQBAL 0x00006C00
#define IAVF_ARQT 0x00007000
#define IAVF_ARQH 0x00007400
#define IAVF_ATQBAH 0x00007800
#define IAVF_ATQBAL 0x00007C00
#define IAVF_ARQLEN 0x00008000
#define IAVF_ATQT 0x00008400
#define IAVF_VFGEN_RSTAT 0x00008800
#define IAVF_QTX_TAIL(q) (0x00000000 + (0x4 * q))
#define IAVF_QRX_TAIL(q) (0x00002000 + (0x4 * q))
#define foreach_virtchnl_op \
_ (0, UNKNOWN) \