dpdk: bump to dpdk 19.02

HQoS requires fixes to work with dpdk 19.02 so code is disabled and
pending deprecation unless active maintainer is found.

Change-Id: I3569c4287b6dfdd2c29e02375eb53bf01fa6ae84
Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
Damjan Marion
2019-02-02 16:28:16 +01:00
committed by Damjan Marion
parent 773291163a
commit 1a6ece3435
15 changed files with 35 additions and 1255 deletions

View File

@@ -21,12 +21,12 @@ DPDK_MLX5_PMD_DLOPEN_DEPS ?= n
DPDK_TAP_PMD ?= n
DPDK_FAILSAFE_PMD ?= n
DPDK_VERSION ?= 18.11
DPDK_VERSION ?= 19.02
DPDK_BASE_URL ?= http://fast.dpdk.org/rel
DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz
DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL)
DPDK_18.08_TARBALL_MD5_CKSUM := da5e7fb25ab063c47e53929fb8c58be5
DPDK_18.11_TARBALL_MD5_CKSUM := 04b86f4a77f4f81a7fbd26467dd2ea9f
DPDK_19.02_TARBALL_MD5_CKSUM := 23944a2cdee061aa4bd72ebe7d836db0
MACHINE=$(shell uname -m)
# replace dot with space, and if 3rd word exists we deal with stable dpdk rel

View File

@@ -1,28 +0,0 @@
From 65a8641604212d58defd71491c900d84d662a086 Mon Sep 17 00:00:00 2001
From: Damjan Marion <damarion@cisco.com>
Date: Wed, 6 Jun 2018 21:57:58 +0200
Subject: [PATCH] i40evf: don't reset device_info data
At this point valid data is already set by rte_eth_get_device_info.
device field becomes zero and consumer is not able retrieve pci data.
Signed-off-by: Damjan Marion <damarion@cisco.com>
---
drivers/net/i40e/i40e_ethdev_vf.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 804e44530..86b38d202 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2182,7 +2182,6 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- memset(dev_info, 0, sizeof(*dev_info));
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
--
2.17.1

View File

@@ -1,13 +0,0 @@
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index e7e9256e5..2fb0a072c 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -5296,7 +5296,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
* Section 73.10.2, we may have to wait up to 500ms if KR is
* attempted. 82599 uses the same timing for 10g SFI.
*/
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < 10; i++) {
/* Wait for the link partner to also set speed */
msec_delay(100);

View File

@@ -1,270 +0,0 @@
From bd42c77c457146bede32333558b4e0414b30683e Mon Sep 17 00:00:00 2001
From: Yongseok Koh <yskoh@mellanox.com>
Date: Fri, 24 Aug 2018 16:46:49 -0700
Subject: [PATCH] net/mlx5: support externally allocated mempool
When MLX PMD registers memory for DMA, it accesses the global memseg list
of DPDK to maximize the range of registration so that LKey search can be
more efficient. Granularity of MR registration is per page.
Externally allocated memory shouldn't be used for DMA because it can't be
searched in the memseg list and free event can't be tracked by DPDK.
However, if the external memory is static (allocated on startup and never
freed), such memory can also be registered by little tweak in the code.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
---
drivers/net/mlx5/mlx5_mr.c | 155 +++++++++++++++++++++++++++++++++++++++++++
drivers/net/mlx5/mlx5_rxtx.h | 35 +++++++++-
2 files changed, 189 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 08105a443..876622e91 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -277,6 +277,23 @@ mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
uintptr_t end = 0;
uint32_t idx = 0;
+ /* MR for external memory doesn't have memseg list. */
+ if (mr->msl == NULL) {
+ struct ibv_mr *ibv_mr = mr->ibv_mr;
+
+ assert(mr->ms_bmp_n == 1);
+ assert(mr->ms_n == 1);
+ assert(base_idx == 0);
+ /*
+ * Can't search it from memseg list but get it directly from
+ * verbs MR as there's only one chunk.
+ */
+ entry->start = (uintptr_t)ibv_mr->addr;
+ entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+ /* Returning 1 ends iteration. */
+ return 1;
+ }
for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
if (rte_bitmap_get(mr->ms_bmp, idx)) {
const struct rte_memseg_list *msl;
@@ -818,6 +835,7 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
mr = mr_lookup_dev_list(dev, &entry, start);
if (mr == NULL)
continue;
+ assert(mr->msl); /* Can't be external memory. */
ms = rte_mem_virt2memseg((void *)start, msl);
assert(ms != NULL);
assert(msl->page_sz == ms->hugepage_sz);
@@ -1070,6 +1088,139 @@ mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
(void *)mr_ctrl, mr_ctrl->cur_gen);
}
+/**
+ * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
+ *
+ * Externally allocated chunk is registered and a MR is created for the chunk.
+ * The MR object is added to the global list. If memseg list of a MR object
+ * (mr->msl) is null, the MR object can be regarded as externally allocated
+ * memory.
+ *
+ * Once external memory is registered, it should be static. If the memory is
+ * freed and the virtual address range has different physical memory mapped
+ * again, it may cause crash on device due to the wrong translation entry. PMD
+ * can't track the free event of the external memory for now.
+ */
+static void
+mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ struct rte_eth_dev *dev = data->dev;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
+ struct mlx5_mr *mr = NULL;
+ uintptr_t addr = (uintptr_t)memhdr->addr;
+ size_t len = memhdr->len;
+ struct mlx5_mr_cache entry;
+ uint32_t lkey;
+
+ /* If already registered, it should return. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_lookup_dev(dev, &entry, addr);
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ if (lkey != UINT32_MAX)
+ return;
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE),
+ RTE_CACHE_LINE_SIZE, mp->socket_id);
+ if (mr == NULL) {
+ DRV_LOG(WARNING,
+ "port %u unable to allocate memory for a new MR of"
+ " mempool (%s).",
+ dev->data->port_id, mp->name);
+ data->ret = -1;
+ return;
+ }
+ DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
+ dev->data->port_id, mem_idx, mp->name);
+ mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ DRV_LOG(WARNING,
+ "port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
+ rte_free(mr);
+ data->ret = -1;
+ return;
+ }
+ mr->msl = NULL; /* Mark it is external memory. */
+ mr->ms_bmp = NULL;
+ mr->ms_n = 1;
+ mr->ms_bmp_n = 1;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ DRV_LOG(DEBUG,
+ "port %u MR CREATED (%p) for external memory %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ /* Insert to the global cache table. */
+ mr_insert_dev_cache(dev, mr);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Insert to the local cache table */
+ mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr);
+}
+
+/**
+ * Register MR for entire memory chunks in a Mempool having externally allocated
+ * memory and fill in local cache.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mp
+ * Pointer to registering Mempool.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static uint32_t
+mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
+
+ rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data);
+ return data.ret;
+}
+
+/**
+ * Register MR entire memory chunks in a Mempool having externally allocated
+ * memory and search LKey of the address to return.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Search key.
+ * @param mp
+ * Pointer to registering Mempool where addr belongs.
+ *
+ * @return
+ * LKey for address on success, UINT32_MAX on failure.
+ */
+uint32_t
+mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
+ struct rte_mempool *mp)
+{
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct priv *priv = txq_ctrl->priv;
+
+ mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
+ return mlx5_tx_addr2mr_bh(txq, addr);
+}
+
/* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
static void
mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
@@ -1113,6 +1264,10 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
};
rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
+ if (data.ret < 0 && rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp);
+ }
return data.ret;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index f53bb43c3..b61c23b33 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -347,6 +347,8 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
+uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
+ struct rte_mempool *mp);
#ifndef NDEBUG
/**
@@ -534,6 +536,24 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
}
/**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
+ * cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+mlx5_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
+
+/**
* Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
* as mempool is pre-configured and static.
*
@@ -591,7 +611,20 @@ mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
return mlx5_tx_addr2mr_bh(txq, addr);
}
-#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+static __rte_always_inline uint32_t
+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
+{
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ uint32_t lkey = mlx5_tx_addr2mr(txq, addr);
+
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ if (rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ lkey = mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
+ }
+ return lkey;
+}
/**
* Ring TX queue doorbell and flush the update if requested.
--
2.11.0

View File

@@ -1,250 +0,0 @@
From c947fd2ec67e9bbacb8b106f320f6e6bae5a9731 Mon Sep 17 00:00:00 2001
From: Matthew Smith <mgsmith@netgate.com>
Date: Tue, 28 Aug 2018 13:21:04 -0500
Subject: [PATCH] mlx4: support externally allocated mempool
Port Mellanox mlx5 PMD patch to work for mlx4 PMD.
Signed-off-by: Matthew Smith <mgsmith@netgate.com>
---
drivers/net/mlx4/mlx4_mr.c | 150 +++++++++++++++++++++++++++++++++++++++++++
drivers/net/mlx4/mlx4_rxtx.h | 35 +++++++++-
2 files changed, 184 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c
index d23d3c613..55e5555ce 100644
--- a/drivers/net/mlx4/mlx4_mr.c
+++ b/drivers/net/mlx4/mlx4_mr.c
@@ -289,6 +289,23 @@ mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry,
uintptr_t end = 0;
uint32_t idx = 0;
+ /* MR for external memory doesn't have memseg list. */
+ if (mr->msl == NULL) {
+ struct ibv_mr *ibv_mr = mr->ibv_mr;
+
+ assert(mr->ms_bmp_n == 1);
+ assert(mr->ms_n == 1);
+ assert(base_idx == 0);
+ /*
+ * Can't search it from memseg list but get it directly from
+ * verbs MR as there's only one chunk.
+ */
+ entry->start = (uintptr_t)ibv_mr->addr;
+ entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+ /* Returning 1 ends iteration. */
+ return 1;
+ }
for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
if (rte_bitmap_get(mr->ms_bmp, idx)) {
const struct rte_memseg_list *msl;
@@ -809,6 +826,7 @@ mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
mr = mr_lookup_dev_list(dev, &entry, start);
if (mr == NULL)
continue;
+ assert(mr->msl); /* Can't be external memory. */
ms = rte_mem_virt2memseg((void *)start, msl);
assert(ms != NULL);
assert(msl->page_sz == ms->hugepage_sz);
@@ -1055,6 +1073,134 @@ mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl)
(void *)mr_ctrl, mr_ctrl->cur_gen);
}
+/**
+ * Called during rte_mempool_mem_iter() by mlx4_mr_update_ext_mp().
+ *
+ * Externally allocated chunk is registered and a MR is created for the chunk.
+ * The MR object is added to the global list. If memseg list of a MR object
+ * (mr->msl) is null, the MR object can be regarded as externally allocated
+ * memory.
+ *
+ * Once external memory is registered, it should be static. If the memory is
+ * freed and the virtual address range has different physical memory mapped
+ * again, it may cause crash on device due to the wrong translation entry. PMD
+ * can't track the free event of the external memory for now.
+ */
+static void
+mlx4_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ struct rte_eth_dev *dev = data->dev;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr_ctrl *mr_ctrl = data->mr_ctrl;
+ struct mlx4_mr *mr = NULL;
+ uintptr_t addr = (uintptr_t)memhdr->addr;
+ size_t len = memhdr->len;
+ struct mlx4_mr_cache entry;
+ uint32_t lkey;
+
+ /* If already registered, it should return. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_lookup_dev(dev, &entry, addr);
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ if (lkey != UINT32_MAX)
+ return;
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE),
+ RTE_CACHE_LINE_SIZE, mp->socket_id);
+ if (mr == NULL) {
+ WARN("port %u unable to allocate memory for a new MR of"
+ " mempool (%s).",
+ dev->data->port_id, mp->name);
+ data->ret = -1;
+ return;
+ }
+ DEBUG("port %u register MR for chunk #%d of mempool (%s)",
+ dev->data->port_id, mem_idx, mp->name);
+ mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)addr, len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ WARN("port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
+ rte_free(mr);
+ data->ret = -1;
+ return;
+ }
+ mr->msl = NULL; /* Mark it is external memory. */
+ mr->ms_bmp = NULL;
+ mr->ms_n = 1;
+ mr->ms_bmp_n = 1;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ DEBUG("port %u MR CREATED (%p) for external memory %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ /* Insert to the global cache table. */
+ mr_insert_dev_cache(dev, mr);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Insert to the local cache table */
+ mlx4_mr_addr2mr_bh(dev, mr_ctrl, addr);
+}
+
+/**
+ * Register MR for entire memory chunks in a Mempool having externally allocated
+ * memory and fill in local cache.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mp
+ * Pointer to registering Mempool.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static uint32_t
+mlx4_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
+
+ rte_mempool_mem_iter(mp, mlx4_mr_update_ext_mp_cb, &data);
+ return data.ret;
+}
+
+/**
+ * Register MR entire memory chunks in a Mempool having externally allocated
+ * memory and search LKey of the address to return.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Search key.
+ * @param mp
+ * Pointer to registering Mempool where addr belongs.
+ *
+ * @return
+ * LKey for address on success, UINT32_MAX on failure.
+ */
+uint32_t
+mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr,
+ struct rte_mempool *mp)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct priv *priv = txq->priv;
+
+ mlx4_mr_update_ext_mp(priv->dev, mr_ctrl, mp);
+ return mlx4_tx_addr2mr_bh(txq, addr);
+}
+
/* Called during rte_mempool_mem_iter() by mlx4_mr_update_mp(). */
static void
mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
@@ -1098,6 +1244,10 @@ mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
};
rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data);
+ if (data.ret < 0 && rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ return mlx4_mr_update_ext_mp(dev, mr_ctrl, mp);
+ }
return data.ret;
}
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index ffa8abfca..1be060cda 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -163,6 +163,26 @@ void mlx4_tx_queue_release(void *dpdk_txq);
void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl);
uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr);
uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr);
+uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr,
+ struct rte_mempool *mp);
+
+/**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
+ * cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+mlx4_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
/**
* Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
@@ -222,6 +242,19 @@ mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr)
return mlx4_tx_addr2mr_bh(txq, addr);
}
-#define mlx4_tx_mb2mr(rxq, mb) mlx4_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+static __rte_always_inline uint32_t
+mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
+{
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ uint32_t lkey = mlx4_tx_addr2mr(txq, addr);
+
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ if (rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ lkey = mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb));
+ }
+ return lkey;
+}
#endif /* MLX4_RXTX_H_ */
--
2.15.2 (Apple Git-101.1)

View File

@@ -1,39 +0,0 @@
From ba9b381c532fe57c726752b7db0ab45ab7726c90 Mon Sep 17 00:00:00 2001
From: Matthew Smith <mgsmith@netgate.com>
Date: Fri, 13 Jul 2018 16:35:57 -0500
Subject: [PATCH] ixgbe: wait longer for link after fiber MAC setup
After setting up the link on a fiber port, the maximum wait time for
the link to come up is 500 ms in ixgbe_setup_mac_link_multispeed_fiber().
On an x550 SFP+ port, this is often not sufficiently long for the link
to come up. This can result in never being able to retrieve accurate
link status for the port using rte_eth_link_get_nowait().
Increase the maximum wait time in ixgbe_setup_mac_link_multispeed_fiber()
to 1 s.
Bugzilla ID: 69
Fixes: f3430431abaf ("ixgbe/base: add SFP+ dual-speed support")
Cc: stable@dpdk.org
Signed-off-by: Matthew Smith <mgsmith@netgate.com>
---
drivers/net/ixgbe/base/ixgbe_common.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index e7e9256e5..2fb0a072c 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -5296,7 +5296,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
* Section 73.10.2, we may have to wait up to 500ms if KR is
* attempted. 82599 uses the same timing for 10g SFI.
*/
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < 10; i++) {
/* Wait for the link partner to also set speed */
msec_delay(100);
--
2.15.2 (Apple Git-101.1)

View File

@@ -1,30 +0,0 @@
From b47596e5d3db40b967b4d67207bb2d87db214f2e Mon Sep 17 00:00:00 2001
From: Damjan Marion <damarion@cisco.com>
Date: Tue, 25 Sep 2018 10:02:58 +0200
Subject: [PATCH] net/i40e: fix 25G AOC and ACC cable detection on XXV710
Fixes: 75d133dd3296 ("net/i40e: enable 25G device")
Cc: stable@dpdk.org
Signed-off-by: Damjan Marion <damarion@cisco.com>
---
drivers/net/i40e/i40e_ethdev.h | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 3fffe5a55..b876933e5 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1393,6 +1393,8 @@ i40e_calc_itr_interval(bool is_pf, bool is_multi_drv)
(((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_KR) || \
((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_CR) || \
((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_SR) || \
- ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR))
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_AOC) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_ACC))
#endif /* _I40E_ETHDEV_H_ */
--
2.17.1

View File

@@ -1,39 +0,0 @@
From c030dc71ff4069c5b5e5b7889a2926617075f6e3 Mon Sep 17 00:00:00 2001
From: Stephen Hemminger <stephen@networkplumber.org>
Date: Thu, 25 Oct 2018 10:33:01 -0700
Subject: [PATCH] mlx5: use pkg-config to handle SUSE libmnl
SUSE decided to install the libmnl include file in a non-standard
place: /usr/include/libmnl/libmnl/libmnl.h
This was probably a mistake by the SUSE package maintainer,
but hard to get fixed. Workaround the problem by pkg-config to find
the necessary include directive for libmnl.
Fixes: 20b71e92ef8e ("net/mlx5: lay groundwork for switch offloads")
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/mlx5/Makefile | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index 1e9c0b42ac16..8497c98ef86e 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -51,6 +51,7 @@ CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -Wno-strict-prototypes
+CFLAGS += $(shell pkg-config --cflags libmnl)
ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
CFLAGS += -DMLX5_GLUE='"$(LIB_GLUE)"'
CFLAGS += -DMLX5_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
@@ -57,7 +58,7 @@ LDLIBS += -ldl
else
LDLIBS += -libverbs -lmlx5
endif
-LDLIBS += -lmnl
+LDLIBS += $(shell pkg-config --libs libmnl)
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci

View File

@@ -1,203 +0,0 @@
commit 6b2a47d
Author: Jia Yu <jyu@vmware.com>
AuthorDate: Sun Aug 19 22:18:45 2018 -0700
Commit: Ferruh Yigit <ferruh.yigit@intel.com>
CommitDate: Tue Aug 28 15:27:39 2018 +0200
net/bonding: fix buffer corruption in packets
When bond slave devices cannot transmit all packets in bufs array,
tx_burst callback shall merge the un-transmitted packets back to
bufs array. Recent merge logic introduced a bug which causes
invalid mbuf addresses being written to bufs array.
When caller frees the un-transmitted packets, due to invalid addresses,
application will crash.
The fix is avoid shifting mbufs, and directly write un-transmitted
packets back to bufs array.
Fixes: 09150784a776 ("net/bonding: burst mode hash calculation")
Cc: stable@dpdk.org
Signed-off-by: Jia Yu <jyu@vmware.com>
Acked-by: Chas Williams <chas3@att.com>
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 4417422..b84f322 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -301,10 +301,10 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
/* Mapping array generated by hash function to map mbufs to slaves */
uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
- uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t slave_tx_count;
uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- uint16_t i, j;
+ uint16_t i;
if (unlikely(nb_bufs == 0))
return 0;
@@ -359,34 +359,12 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
/* If tx burst fails move packets to end of bufs */
if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ int slave_tx_fail_count = slave_nb_bufs[i] -
slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count[i];
-
- /*
- * Shift bufs to beginning of array to allow reordering
- * later
- */
- for (j = 0; j < slave_tx_fail_count[i]; j++) {
- slave_bufs[i][j] =
- slave_bufs[i][(slave_tx_count - 1) + j];
- }
- }
- }
-
- /*
- * If there are tx burst failures we move packets to end of bufs to
- * preserve expected PMD behaviour of all failed transmitted being
- * at the end of the input mbuf array
- */
- if (unlikely(total_tx_fail_count > 0)) {
- int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
- for (i = 0; i < slave_count; i++) {
- if (slave_tx_fail_count[i] > 0) {
- for (j = 0; j < slave_tx_fail_count[i]; j++)
- bufs[bufs_idx++] = slave_bufs[i][j];
- }
+ total_tx_fail_count += slave_tx_fail_count;
+ memcpy(&bufs[nb_bufs - total_tx_fail_count],
+ &slave_bufs[i][slave_tx_count],
+ slave_tx_fail_count * sizeof(bufs[0]));
}
}
@@ -716,8 +694,8 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
tx_fail_total += tx_fail_slave;
memcpy(&bufs[nb_pkts - tx_fail_total],
- &slave_bufs[i][num_tx_slave],
- tx_fail_slave * sizeof(bufs[0]));
+ &slave_bufs[i][num_tx_slave],
+ tx_fail_slave * sizeof(bufs[0]));
}
num_tx_total += num_tx_slave;
}
@@ -1222,10 +1200,10 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
/* Mapping array generated by hash function to map mbufs to slaves */
uint16_t bufs_slave_port_idxs[nb_bufs];
- uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t slave_tx_count;
uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- uint16_t i, j;
+ uint16_t i;
if (unlikely(nb_bufs == 0))
return 0;
@@ -1266,34 +1244,12 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
/* If tx burst fails move packets to end of bufs */
if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ int slave_tx_fail_count = slave_nb_bufs[i] -
slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count[i];
-
- /*
- * Shift bufs to beginning of array to allow reordering
- * later
- */
- for (j = 0; j < slave_tx_fail_count[i]; j++) {
- slave_bufs[i][j] =
- slave_bufs[i][(slave_tx_count - 1) + j];
- }
- }
- }
-
- /*
- * If there are tx burst failures we move packets to end of bufs to
- * preserve expected PMD behaviour of all failed transmitted being
- * at the end of the input mbuf array
- */
- if (unlikely(total_tx_fail_count > 0)) {
- int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
- for (i = 0; i < slave_count; i++) {
- if (slave_tx_fail_count[i] > 0) {
- for (j = 0; j < slave_tx_fail_count[i]; j++)
- bufs[bufs_idx++] = slave_bufs[i][j];
- }
+ total_tx_fail_count += slave_tx_fail_count;
+ memcpy(&bufs[nb_bufs - total_tx_fail_count],
+ &slave_bufs[i][slave_tx_count],
+ slave_tx_fail_count * sizeof(bufs[0]));
}
}
@@ -1320,10 +1276,10 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
/* Mapping array generated by hash function to map mbufs to slaves */
uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
- uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t slave_tx_count;
uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- uint16_t i, j;
+ uint16_t i;
if (unlikely(nb_bufs == 0))
return 0;
@@ -1381,39 +1337,13 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
/* If tx burst fails move packets to end of bufs */
if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ int slave_tx_fail_count = slave_nb_bufs[i] -
slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count[i];
-
- /*
- * Shift bufs to beginning of array to allow
- * reordering later
- */
- for (j = 0; j < slave_tx_fail_count[i]; j++)
- slave_bufs[i][j] =
- slave_bufs[i]
- [(slave_tx_count - 1)
- + j];
- }
- }
+ total_tx_fail_count += slave_tx_fail_count;
- /*
- * If there are tx burst failures we move packets to end of
- * bufs to preserve expected PMD behaviour of all failed
- * transmitted being at the end of the input mbuf array
- */
- if (unlikely(total_tx_fail_count > 0)) {
- int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
- for (i = 0; i < slave_count; i++) {
- if (slave_tx_fail_count[i] > 0) {
- for (j = 0;
- j < slave_tx_fail_count[i];
- j++) {
- bufs[bufs_idx++] =
- slave_bufs[i][j];
- }
- }
+ memcpy(&bufs[nb_bufs - total_tx_fail_count],
+ &slave_bufs[i][slave_tx_count],
+ slave_tx_fail_count * sizeof(bufs[0]));
}
}
}