Handle dynamic enable/disable VHOST_USER interface

If number of Qs enabled by the guest is less
    than number of Qs exported to guest, then all
    threads route packet to Q0 with the assumption
    that guest is not interested in performance.
    If all Qs are enabled, each thread queues packet
    in their own TX-Q of the VHOST_USER interface,
    boosting performance.

Change-Id: Ic24bb8c0505b11c7513aeecd21c9ec5da5f90138
Signed-off-by: Shesha Sreenivasamurthy <shesha@cisco.com>
This commit is contained in:
Shesha Sreenivasamurthy
2016-02-19 13:36:53 -08:00
committed by Gerrit Code Review
parent eef4d99bcd
commit 9ad5adcf08
4 changed files with 29 additions and 11 deletions

View File

@ -221,7 +221,8 @@ u32 tx_burst_vector_internal (vlib_main_t * vm,
* This device only supports one TX queue,
* and we're running multi-threaded...
*/
if (PREDICT_FALSE(xd->lockp != 0))
if (PREDICT_FALSE(xd->dev_type != VNET_DPDK_DEV_VHOST_USER &&
xd->lockp != 0))
{
queue_id = queue_id % xd->tx_q_used;
while (__sync_lock_test_and_set (xd->lockp[queue_id], 1))
@ -264,8 +265,12 @@ u32 tx_burst_vector_internal (vlib_main_t * vm,
else if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
{
u32 offset = 0;
if (xd->need_txlock) {
queue_id = 0;
while (__sync_lock_test_and_set (xd->lockp[queue_id], 1));
}
#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
if (PREDICT_TRUE(xd->lockp == NULL)) {
else {
dpdk_device_and_queue_t * dq;
vec_foreach (dq, dm->devices_by_cpu[vm->cpu_index])
{
@ -274,8 +279,6 @@ u32 tx_burst_vector_internal (vlib_main_t * vm,
}
assert (dq);
offset = dq->queue_id * VIRTIO_QNUM;
} else {
offset = queue_id * VIRTIO_QNUM;
}
#endif
if (PREDICT_TRUE(tx_head > tx_tail))
@ -331,6 +334,9 @@ u32 tx_burst_vector_internal (vlib_main_t * vm,
n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
}
if (xd->need_txlock)
*xd->lockp[queue_id] = 0;
}
#if RTE_LIBRTE_KNI
else if (xd->dev_type == VNET_DPDK_DEV_KNI)
@ -370,7 +376,8 @@ u32 tx_burst_vector_internal (vlib_main_t * vm,
rv = 0;
}
if (PREDICT_FALSE(xd->lockp != 0))
if (PREDICT_FALSE(xd->dev_type != VNET_DPDK_DEV_VHOST_USER &&
xd->lockp != 0))
*xd->lockp[queue_id] = 0;
if (PREDICT_FALSE(rv < 0))

View File

@ -246,6 +246,7 @@ typedef struct {
dpdk_port_type_t port_type;
dpdk_efd_agent_t efd_agent;
u8 need_txlock; /* Used by VNET_DPDK_DEV_VHOST_USER */
} dpdk_device_t;
#define MAX_NELTS 32

View File

@ -215,6 +215,7 @@ dpdk_device_lock_init(dpdk_device_t * xd)
CLIB_CACHE_LINE_BYTES);
memset ((void *) xd->lockp[q], 0, CLIB_CACHE_LINE_BYTES);
}
xd->need_txlock = 1;
}
void
@ -226,6 +227,7 @@ dpdk_device_lock_free(dpdk_device_t * xd)
clib_mem_free((void *) xd->lockp[q]);
vec_free(xd->lockp);
xd->lockp = 0;
xd->need_txlock = 0;
}
static clib_error_t *

View File

@ -198,7 +198,7 @@ dpdk_create_vhost_user_if_internal (u32 * hw_if_index, u32 if_id, u8 *hwaddr)
int num_qpairs = 1;
#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
num_qpairs = dm->use_rss < 1 ? 1 : dm->use_rss;
num_qpairs = dm->use_rss < 1 ? 1 : tm->n_vlib_mains;
#endif
dpdk_device_t * xd = NULL;
@ -242,9 +242,7 @@ dpdk_create_vhost_user_if_internal (u32 * hw_if_index, u32 if_id, u8 *hwaddr)
// reset lockp
dpdk_device_lock_free(xd);
if (xd->tx_q_used < tm->n_vlib_mains)
dpdk_device_lock_init(xd);
dpdk_device_lock_init(xd);
// reset tx vectors
for (j = 0; j < tm->n_vlib_mains; j++)
@ -301,8 +299,7 @@ dpdk_create_vhost_user_if_internal (u32 * hw_if_index, u32 if_id, u8 *hwaddr)
xd->vu_vhost_dev.virtqueue[j]->backend = -1;
}
if (xd->tx_q_used < dm->input_cpu_count)
dpdk_device_lock_init(xd);
dpdk_device_lock_init(xd);
DBG_SOCK("tm->n_vlib_mains: %d. TX %d, RX: %d, num_qpairs: %d, Lock: %p",
tm->n_vlib_mains, xd->tx_q_used, xd->rx_q_used, num_qpairs, xd->lockp);
@ -706,6 +703,17 @@ int dpdk_vhost_user_set_vring_enable(u32 hw_if_index, u8 idx, int enable)
*/
vui->vrings[idx].enabled = enable; /* Save local copy */
int numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
while (numqs--) {
if (! vui->vrings[numqs].enabled)
break;
}
if (numqs == -1) /* All Qs are enabled */
xd->need_txlock = 0;
else
xd->need_txlock = 1;
vq = xd->vu_vhost_dev.virtqueue[idx];
if (vq->desc && vq->avail && vq->used)
xd->vu_vhost_dev.virtqueue[idx]->enabled = enable;