buffers: major cleanup and improvements

This patch introduces following changes:
- deprecated free lists which are not used and not compatible
  with external buffer managers (i.e. DPDK)
- introduces native support for per-numa buffer pools
- significantly improves performance of buffer alloc and free

Change-Id: I4a8e723ae47056717afd6cac0efe87cb731b5be7
Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
Damjan Marion
2019-01-21 11:48:34 +01:00
committed by Dave Barach
parent 4fd5a9d3e6
commit 910d3694e8
26 changed files with 1036 additions and 1411 deletions
+1
View File
@@ -102,6 +102,7 @@ typedef struct
u32 *bufs;
u16 n_enqueued;
u8 int_mode;
u8 buffer_pool_index;
} avf_rxq_t;
typedef struct
+5 -1
View File
@@ -229,6 +229,9 @@ avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
2 * CLIB_CACHE_LINE_BYTES,
ad->numa_node);
rxq->buffer_pool_index =
vlib_buffer_pool_get_default_for_numa (vm, ad->numa_node);
if (rxq->descs == 0)
return vlib_physmem_last_error (vm);
@@ -239,7 +242,8 @@ avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
vec_validate_aligned (rxq->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
n_alloc = vlib_buffer_alloc (vm, rxq->bufs, rxq->size - 8);
n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
rxq->buffer_pool_index);
if (n_alloc == 0)
return clib_error_return (0, "buffer allocation error");
+4 -1
View File
@@ -72,7 +72,9 @@ avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq,
slot = (rxq->next - n_refill - 1) & mask;
n_refill &= ~7; /* round to 8 */
n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, size, n_refill);
n_alloc =
vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, size, n_refill,
rxq->buffer_pool_index);
if (PREDICT_FALSE (n_alloc != n_refill))
{
@@ -368,6 +370,7 @@ no_more_desc:
vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
bt->buffer_pool_index = rxq->buffer_pool_index;
if (n_tail_desc)
n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 1);
+311 -519
View File
File diff suppressed because it is too large Load Diff
+4 -2
View File
@@ -19,8 +19,10 @@
#define rte_mbuf_from_vlib_buffer(x) (((struct rte_mbuf *)x) - 1)
#define vlib_buffer_from_rte_mbuf(x) ((vlib_buffer_t *)(x+1))
clib_error_t *dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs,
unsigned socket_id);
extern struct rte_mempool **dpdk_mempool_by_buffer_pool_index;
extern struct rte_mempool **dpdk_no_cache_mempool_by_buffer_pool_index;
clib_error_t *dpdk_buffer_pools_create (vlib_main_t * vm);
#endif /* include_dpdk_buffer_h */
+19 -72
View File
@@ -382,27 +382,27 @@ static clib_error_t *
show_dpdk_buffer (vlib_main_t * vm, unformat_input_t * input,
vlib_cli_command_t * cmd)
{
struct rte_mempool *rmp;
int i;
vlib_buffer_main_t *bm = vm->buffer_main;
vlib_buffer_pool_t *bp;
for (i = 0; i < vec_len (dpdk_main.pktmbuf_pools); i++)
{
rmp = dpdk_main.pktmbuf_pools[i];
if (rmp)
{
unsigned count = rte_mempool_avail_count (rmp);
unsigned free_count = rte_mempool_in_use_count (rmp);
vec_foreach (bp, bm->buffer_pools)
{
struct rte_mempool *rmp = dpdk_mempool_by_buffer_pool_index[bp->index];
if (rmp)
{
unsigned count = rte_mempool_avail_count (rmp);
unsigned free_count = rte_mempool_in_use_count (rmp);
vlib_cli_output (vm,
"name=\"%s\" available = %7d allocated = %7d total = %7d\n",
rmp->name, (u32) count, (u32) free_count,
(u32) (count + free_count));
}
else
{
vlib_cli_output (vm, "rte_mempool is NULL (!)\n");
}
}
vlib_cli_output (vm,
"name=\"%s\" available = %7d allocated = %7d total = %7d\n",
rmp->name, (u32) count, (u32) free_count,
(u32) (count + free_count));
}
else
{
vlib_cli_output (vm, "rte_mempool is NULL (!)\n");
}
}
return 0;
}
@@ -2018,59 +2018,6 @@ VLIB_CLI_COMMAND (show_vpe_version_command, static) = {
};
/* *INDENT-ON* */
#if CLI_DEBUG
static clib_error_t *
dpdk_validate_buffers_fn (vlib_main_t * vm, unformat_input_t * input,
vlib_cli_command_t * cmd_arg)
{
u32 n_invalid_bufs = 0, uninitialized = 0;
u32 is_poison = 0, is_test = 0;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "poison"))
is_poison = 1;
else if (unformat (input, "trajectory"))
is_test = 1;
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
}
if (VLIB_BUFFER_TRACE_TRAJECTORY == 0)
{
vlib_cli_output (vm, "Trajectory not enabled. Recompile with "
"VLIB_BUFFER_TRACE_TRAJECTORY 1");
return 0;
}
if (is_poison)
{
dpdk_buffer_poison_trajectory_all ();
}
if (is_test)
{
n_invalid_bufs = dpdk_buffer_validate_trajectory_all (&uninitialized);
if (!n_invalid_bufs)
vlib_cli_output (vm, "All buffers are valid %d uninitialized",
uninitialized);
else
vlib_cli_output (vm, "Found %d invalid buffers and %d uninitialized",
n_invalid_bufs, uninitialized);
}
return 0;
}
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (test_dpdk_buffers_command, static) =
{
.path = "test dpdk buffers",
.short_help = "test dpdk buffers [poison] [trajectory]",
.function = dpdk_validate_buffers_fn,
};
/* *INDENT-ON* */
#endif
clib_error_t *
dpdk_cli_init (vlib_main_t * vm)
{
+9 -11
View File
@@ -40,6 +40,7 @@ void
dpdk_device_setup (dpdk_device_t * xd)
{
dpdk_main_t *dm = &dpdk_main;
vlib_main_t *vm = vlib_get_main ();
vnet_main_t *vnm = vnet_get_main ();
vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, xd->sw_if_index);
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
@@ -116,26 +117,23 @@ dpdk_device_setup (dpdk_device_t * xd)
CLIB_CACHE_LINE_BYTES);
for (j = 0; j < xd->rx_q_used; j++)
{
dpdk_mempool_private_t *privp;
uword tidx = vnet_get_device_input_thread_index (dm->vnet_main,
xd->hw_if_index, j);
unsigned lcore = vlib_worker_threads[tidx].cpu_id;
u16 socket_id = rte_lcore_to_socket_id (lcore);
u8 bpidx = vlib_buffer_pool_get_default_for_numa (vm, socket_id);
vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, bpidx);
struct rte_mempool *mp = dpdk_mempool_by_buffer_pool_index[bpidx];
rv =
rte_eth_rx_queue_setup (xd->port_id, j, xd->nb_rx_desc,
xd->cpu_socket, 0,
dm->pktmbuf_pools[socket_id]);
rv = rte_eth_rx_queue_setup (xd->port_id, j, xd->nb_rx_desc,
xd->cpu_socket, 0, mp);
/* retry with any other CPU socket */
if (rv < 0)
rv =
rte_eth_rx_queue_setup (xd->port_id, j,
xd->nb_rx_desc, SOCKET_ID_ANY, 0,
dm->pktmbuf_pools[socket_id]);
rv = rte_eth_rx_queue_setup (xd->port_id, j, xd->nb_rx_desc,
SOCKET_ID_ANY, 0, mp);
privp = rte_mempool_get_priv (dm->pktmbuf_pools[socket_id]);
xd->buffer_pool_for_queue[j] = privp->buffer_pool_index;
xd->buffer_pool_for_queue[j] = bp->index;
if (rv < 0)
dpdk_device_error (xd, "rte_eth_rx_queue_setup", rv);
+3 -5
View File
@@ -127,11 +127,9 @@ dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
mb->pkt_len = b->current_length;
mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
first_mb->nb_segs++;
if (PREDICT_FALSE (b->n_add_refs))
{
rte_mbuf_refcnt_update (mb, b->n_add_refs);
b->n_add_refs = 0;
}
if (PREDICT_FALSE (b->ref_count > 1))
mb->pool =
dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
}
}
+1 -6
View File
@@ -56,8 +56,6 @@
#include <vlib/pci/pci.h>
#include <vnet/flow/flow.h>
#define NB_MBUF (16<<10)
extern vnet_device_class_t dpdk_device_class;
extern vlib_node_registration_t dpdk_input_node;
extern vlib_node_registration_t admin_up_down_process_node;
@@ -364,7 +362,7 @@ typedef struct
u8 nchannels_set_manually;
u32 coremask;
u32 nchannels;
u32 num_mbufs;
u32 num_crypto_mbufs;
/*
* format interface names ala xxxEthernet%d/%d/%d instead of
@@ -443,9 +441,6 @@ typedef struct
vnet_main_t *vnet_main;
dpdk_config_main_t *conf;
/* mempool */
struct rte_mempool **pktmbuf_pools;
/* API message ID base */
u16 msg_id_base;
-8
View File
@@ -49,14 +49,6 @@ _(file-prefix) \
_(vdev) \
_(log-level)
typedef struct
{
/* must be first */
struct rte_pktmbuf_pool_private mbp_priv;
u8 buffer_pool_index;
} dpdk_mempool_private_t;
static inline void
dpdk_get_xstats (dpdk_device_t * xd)
{
+3 -91
View File
@@ -151,60 +151,6 @@ dpdk_device_lock_init (dpdk_device_t * xd)
}
}
static struct rte_mempool_ops *
get_ops_by_name (char *ops_name)
{
u32 i;
for (i = 0; i < rte_mempool_ops_table.num_ops; i++)
{
if (!strcmp (ops_name, rte_mempool_ops_table.ops[i].name))
return &rte_mempool_ops_table.ops[i];
}
return 0;
}
static int
dpdk_ring_alloc (struct rte_mempool *mp)
{
u32 rg_flags = 0, count;
i32 ret;
char rg_name[RTE_RING_NAMESIZE];
struct rte_ring *r;
ret = snprintf (rg_name, sizeof (rg_name), RTE_MEMPOOL_MZ_FORMAT, mp->name);
if (ret < 0 || ret >= (i32) sizeof (rg_name))
return -ENAMETOOLONG;
/* ring flags */
if (mp->flags & MEMPOOL_F_SP_PUT)
rg_flags |= RING_F_SP_ENQ;
if (mp->flags & MEMPOOL_F_SC_GET)
rg_flags |= RING_F_SC_DEQ;
count = rte_align32pow2 (mp->size + 1);
/*
* Allocate the ring that will be used to store objects.
* Ring functions will return appropriate errors if we are
* running as a secondary process etc., so no checks made
* in this function for that condition.
*/
/* XXX can we get memory from the right socket? */
r = clib_mem_alloc_aligned (rte_ring_get_memsize (count),
CLIB_CACHE_LINE_BYTES);
/* XXX rte_ring_lookup will not work */
ret = rte_ring_init (r, rg_name, count, rg_flags);
if (ret)
return ret;
mp->pool_data = r;
return 0;
}
static int
dpdk_port_crc_strip_enabled (dpdk_device_t * xd)
{
@@ -220,7 +166,6 @@ dpdk_lib_init (dpdk_main_t * dm)
{
u32 nports;
u32 mtu, max_rx_frame;
u32 nb_desc = 0;
int i;
clib_error_t *error;
vlib_main_t *vm = vlib_get_main ();
@@ -631,9 +576,6 @@ dpdk_lib_init (dpdk_main_t * dm)
dq->queue_id = 0;
}
/* count the number of descriptors used for this device */
nb_desc += xd->nb_rx_desc + xd->nb_tx_desc * xd->tx_q_used;
error = ethernet_register_interface
(dm->vnet_main, dpdk_device_class.index, xd->device_index,
/* ethernet address */ addr,
@@ -811,10 +753,6 @@ dpdk_lib_init (dpdk_main_t * dm)
}
/* *INDENT-ON* */
if (nb_desc > dm->conf->num_mbufs)
dpdk_log_err ("%d mbufs allocated but total rx/tx ring size is %d\n",
dm->conf->num_mbufs, nb_desc);
return 0;
}
@@ -1209,7 +1147,8 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input)
}
else if (unformat (input, "num-mem-channels %d", &conf->nchannels))
conf->nchannels_set_manually = 0;
else if (unformat (input, "num-mbufs %d", &conf->num_mbufs))
else if (unformat (input, "num-crypto-mbufs %d",
&conf->num_crypto_mbufs))
;
else if (unformat (input, "uio-driver %s", &conf->uio_driver_name))
;
@@ -1452,36 +1391,10 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input)
if (ret < 0)
return clib_error_return (0, "rte_eal_init returned %d", ret);
/* set custom ring memory allocator */
{
struct rte_mempool_ops *ops = NULL;
ops = get_ops_by_name ("ring_sp_sc");
ops->alloc = dpdk_ring_alloc;
ops = get_ops_by_name ("ring_mp_sc");
ops->alloc = dpdk_ring_alloc;
ops = get_ops_by_name ("ring_sp_mc");
ops->alloc = dpdk_ring_alloc;
ops = get_ops_by_name ("ring_mp_mc");
ops->alloc = dpdk_ring_alloc;
}
/* main thread 1st */
error = dpdk_buffer_pool_create (vm, conf->num_mbufs, rte_socket_id ());
if (error)
if ((error = dpdk_buffer_pools_create (vm)))
return error;
for (i = 0; i < RTE_MAX_LCORE; i++)
{
error = dpdk_buffer_pool_create (vm, conf->num_mbufs,
rte_lcore_to_socket_id (i));
if (error)
return error;
}
done:
return error;
}
@@ -1768,7 +1681,6 @@ dpdk_init (vlib_main_t * vm)
dm->conf = &dpdk_config_main;
dm->conf->nchannels = 4;
dm->conf->num_mbufs = dm->conf->num_mbufs ? dm->conf->num_mbufs : NB_MBUF;
vec_add1 (dm->conf->eal_init_args, (u8 *) "vnet");
vec_add1 (dm->conf->eal_init_args, (u8 *) "--in-memory");
+7 -5
View File
@@ -24,6 +24,7 @@
dpdk_crypto_main_t dpdk_crypto_main;
#define EMPTY_STRUCT {0}
#define NUM_CRYPTO_MBUFS 16384
static void
algos_init (u32 n_mains)
@@ -835,11 +836,12 @@ crypto_create_crypto_op_pool (vlib_main_t * vm, u8 numa)
pool_name = format (0, "crypto_pool_numa%u%c", numa, 0);
mp =
rte_mempool_create ((char *) pool_name,
conf->num_mbufs,
crypto_op_len (), 512, pool_priv_size, NULL, NULL,
crypto_op_init, NULL, numa, 0);
if (conf->num_crypto_mbufs == 0)
conf->num_crypto_mbufs = NUM_CRYPTO_MBUFS;
mp = rte_mempool_create ((char *) pool_name, conf->num_crypto_mbufs,
crypto_op_len (), 512, pool_priv_size, NULL, NULL,
crypto_op_init, NULL, numa, 0);
vec_free (pool_name);
+5
View File
@@ -185,6 +185,7 @@ memif_int_fd_read_ready (clib_file_t * uf)
clib_error_t *
memif_connect (memif_if_t * mif)
{
vlib_main_t *vm = vlib_get_main ();
vnet_main_t *vnm = vnet_get_main ();
clib_file_t template = { 0 };
memif_region_t *mr;
@@ -235,6 +236,7 @@ memif_connect (memif_if_t * mif)
vec_foreach_index (i, mif->rx_queues)
{
memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, i);
u32 ti;
int rv;
mq->ring = mif->regions[mq->region].shm + mq->offset;
@@ -254,6 +256,9 @@ memif_connect (memif_if_t * mif)
memif_file_add (&mq->int_clib_file_index, &template);
}
vnet_hw_interface_assign_rx_thread (vnm, mif->hw_if_index, i, ~0);
ti = vnet_get_device_input_thread_index (vnm, mif->hw_if_index, i);
mq->buffer_pool_index =
vlib_buffer_pool_get_default_for_numa (vm, vlib_mains[ti]->numa_node);
rv = vnet_hw_interface_set_rx_mode (vnm, mif->hw_if_index, i,
VNET_HW_INTERFACE_RX_MODE_DEFAULT);
if (rv)
+6 -3
View File
@@ -280,7 +280,8 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
/* allocate free buffers */
vec_validate_aligned (ptd->buffers, n_buffers - 1, CLIB_CACHE_LINE_BYTES);
n_alloc = vlib_buffer_alloc (vm, ptd->buffers, n_buffers);
n_alloc = vlib_buffer_alloc_from_pool (vm, ptd->buffers, n_buffers,
mq->buffer_pool_index);
if (PREDICT_FALSE (n_alloc != n_buffers))
{
if (n_alloc)
@@ -343,6 +344,7 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_buffer (&ptd->buffer_template)->feature_arc_index = 0;
ptd->buffer_template.current_data = start_offset;
ptd->buffer_template.current_config_index = 0;
ptd->buffer_template.buffer_pool_index = mq->buffer_pool_index;
if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
{
@@ -783,8 +785,9 @@ refill:
clib_memset (dt, 0, sizeof (memif_desc_t));
dt->length = buffer_length;
n_alloc = vlib_buffer_alloc_to_ring (vm, mq->buffers, head & mask,
ring_size, n_slots);
n_alloc = vlib_buffer_alloc_to_ring_from_pool (vm, mq->buffers, head & mask,
ring_size, n_slots,
mq->buffer_pool_index);
if (PREDICT_FALSE (n_alloc != n_slots))
{
+1
View File
@@ -123,6 +123,7 @@ typedef struct
u16 last_head;
u16 last_tail;
u32 *buffers;
u8 buffer_pool_index;
/* interrupts */
int int_fd;
+221 -372
View File
File diff suppressed because it is too large Load Diff
+34 -88
View File
@@ -59,8 +59,6 @@
/* Amount of head buffer data copied to each replica head buffer */
#define VLIB_BUFFER_CLONE_HEAD_SIZE (256)
typedef u8 vlib_buffer_free_list_index_t;
/** \file
vlib buffer structure definition and a few select
access methods. This structure and the buffer allocation
@@ -127,8 +125,8 @@ typedef union
/** Generic flow identifier */
u32 flow_id;
/** Number of additional references to this buffer. */
u8 n_add_refs;
/** Reference count for this buffer. */
volatile u8 ref_count;
/** index of buffer pool this buffer belongs. */
u8 buffer_pool_index;
@@ -367,66 +365,32 @@ vlib_buffer_pull (vlib_buffer_t * b, u8 size)
/* Forward declaration. */
struct vlib_main_t;
typedef struct vlib_buffer_free_list_t
{
/* Template buffer used to initialize first 16 bytes of buffers
allocated on this free list. */
vlib_buffer_t buffer_init_template;
/* Our index into vlib_main_t's buffer_free_list_pool. */
vlib_buffer_free_list_index_t index;
/* Number of buffers to allocate when we need to allocate new buffers */
u32 min_n_buffers_each_alloc;
/* Total number of buffers allocated from this free list. */
u32 n_alloc;
/* Vector of free buffers. Each element is a byte offset into I/O heap. */
u32 *buffers;
/* index of buffer pool used to get / put buffers */
u8 buffer_pool_index;
/* Free list name. */
u8 *name;
/* Callback functions to initialize newly allocated buffers.
If null buffers are zeroed. */
void (*buffer_init_function) (struct vlib_main_t * vm,
struct vlib_buffer_free_list_t * fl,
u32 * buffers, u32 n_buffers);
uword buffer_init_function_opaque;
} __attribute__ ((aligned (16))) vlib_buffer_free_list_t;
typedef uword (vlib_buffer_fill_free_list_cb_t) (struct vlib_main_t * vm,
vlib_buffer_free_list_t * fl,
uword min_free_buffers);
typedef void (vlib_buffer_free_cb_t) (struct vlib_main_t * vm, u32 * buffers,
u32 n_buffers);
typedef void (vlib_buffer_free_no_next_cb_t) (struct vlib_main_t * vm,
u32 * buffers, u32 n_buffers);
typedef struct
{
vlib_buffer_fill_free_list_cb_t *vlib_buffer_fill_free_list_cb;
vlib_buffer_free_cb_t *vlib_buffer_free_cb;
vlib_buffer_free_no_next_cb_t *vlib_buffer_free_no_next_cb;
} vlib_buffer_callbacks_t;
extern vlib_buffer_callbacks_t *vlib_buffer_callbacks;
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 *cached_buffers;
u32 n_alloc;
} vlib_buffer_pool_thread_t;
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
uword start;
uword size;
uword log2_page_size;
u8 index;
u32 numa_node;
u32 physmem_map_index;
u32 buffer_size;
u32 data_size;
u32 n_buffers;
u32 *buffers;
u8 *name;
clib_spinlock_t lock;
/* per-thread data */
vlib_buffer_pool_thread_t *threads;
/* buffer metadata template */
vlib_buffer_t buffer_template;
} vlib_buffer_pool_t;
typedef struct
@@ -438,36 +402,24 @@ typedef struct
uword buffer_mem_size;
vlib_buffer_pool_t *buffer_pools;
/* Buffer free callback, for subversive activities */
u32 (*buffer_free_callback) (struct vlib_main_t * vm,
u32 * buffers,
u32 n_buffers, u32 follow_buffer_next);
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX (0)
/* Hash table mapping buffer size (rounded to next unit of
sizeof (vlib_buffer_t)) to free list index. */
uword *free_list_by_size;
/* Hash table mapping buffer index into number
0 => allocated but free, 1 => allocated and not-free.
If buffer index is not in hash table then this buffer
has never been allocated. */
uword *buffer_known_hash;
clib_spinlock_t buffer_known_hash_lockp;
u32 n_numa_nodes;
/* Callbacks */
vlib_buffer_callbacks_t cb;
int callbacks_registered;
/* config */
u32 buffers_per_numa;
u16 ext_hdr_size;
/* logging */
vlib_log_class_t log_default;
} vlib_buffer_main_t;
u8 vlib_buffer_register_physmem_map (struct vlib_main_t *vm,
u32 physmem_map_index);
clib_error_t *vlib_buffer_main_init (struct vlib_main_t *vm);
void *vlib_set_buffer_free_callback (struct vlib_main_t *vm, void *fp);
/*
*/
@@ -488,23 +440,17 @@ extern void vlib_buffer_trace_trajectory_init (vlib_buffer_t * b);
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
#endif /* VLIB_BUFFER_TRACE_TRAJECTORY */
#endif /* included_vlib_buffer_h */
extern u16 __vlib_buffer_external_hdr_size;
#define VLIB_BUFFER_SET_EXT_HDR_SIZE(x) \
static void __clib_constructor \
vnet_buffer_set_ext_hdr_size() \
{ \
if (__vlib_buffer_external_hdr_size) \
clib_error ("buffer external header space already set"); \
__vlib_buffer_external_hdr_size = CLIB_CACHE_LINE_ROUND (x); \
}
#define VLIB_BUFFER_REGISTER_CALLBACKS(x,...) \
__VA_ARGS__ vlib_buffer_callbacks_t __##x##_buffer_callbacks; \
static void __vlib_add_buffer_callbacks_t_##x (void) \
__attribute__((__constructor__)) ; \
static void __vlib_add_buffer_callbacks_t_##x (void) \
{ \
if (vlib_buffer_callbacks) \
clib_panic ("vlib buffer callbacks already registered"); \
vlib_buffer_callbacks = &__##x##_buffer_callbacks; \
} \
static void __vlib_rm_buffer_callbacks_t_##x (void) \
__attribute__((__destructor__)) ; \
static void __vlib_rm_buffer_callbacks_t_##x (void) \
{ vlib_buffer_callbacks = 0; } \
__VA_ARGS__ vlib_buffer_callbacks_t __##x##_buffer_callbacks
#endif /* included_vlib_buffer_h */
/*
* fd.io coding-style-patch-verification: ON
+331 -189
View File
File diff suppressed because it is too large Load Diff
-2
View File
@@ -18,8 +18,6 @@
#include <vppinfra/types.h>
typedef u32 vlib_log_class_t;
#define foreach_vlib_log_level \
_(0, EMERG, emerg) \
_(1, ALERT, alert) \
+3 -6
View File
@@ -463,7 +463,7 @@ vlib_put_next_frame (vlib_main_t * vm,
vlib_frame_t *f;
u32 n_vectors_in_frame;
if (vm->buffer_main->callbacks_registered == 0 && CLIB_DEBUG > 0)
if (CLIB_DEBUG > 0)
vlib_put_next_frame_validate (vm, r, next_index, n_vectors_left);
nf = vlib_node_runtime_get_next_frame (vm, r, next_index);
@@ -987,8 +987,8 @@ format_buffer_metadata (u8 * s, va_list * args)
(i32) (b->current_data), (i32) (b->current_length));
s = format (s, "current_config_index: %d, flow_id: %x, next_buffer: %x\n",
b->current_config_index, b->flow_id, b->next_buffer);
s = format (s, "error: %d, n_add_refs: %d, buffer_pool_index: %d\n",
(u32) (b->error), (u32) (b->n_add_refs),
s = format (s, "error: %d, ref_count: %d, buffer_pool_index: %d\n",
(u32) (b->error), (u32) (b->ref_count),
(u32) (b->buffer_pool_index));
s = format (s,
"trace_index: %d, len_not_first_buf: %d\n",
@@ -1993,9 +1993,6 @@ vlib_main (vlib_main_t * volatile vm, unformat_input_t * input)
if ((error = vlib_call_all_init_functions (vm)))
goto done;
/* Create default buffer free list. */
vlib_buffer_create_free_list (vm, VLIB_BUFFER_DATA_SIZE, "default");
nm->timing_wheel = clib_mem_alloc_aligned (sizeof (TWT (tw_timer_wheel)),
CLIB_CACHE_LINE_BYTES);
-3
View File
@@ -115,9 +115,6 @@ typedef struct vlib_main_t
/* Size of the heap */
uword heap_size;
/* Pool of buffer free lists. */
vlib_buffer_free_list_t *buffer_free_list_pool;
/* buffer main structure. */
vlib_buffer_main_t *buffer_main;
-22
View File
@@ -719,8 +719,6 @@ start_workers (vlib_main_t * vm)
for (i = 0; i < vec_len (tm->registrations); i++)
{
vlib_node_main_t *nm, *nm_clone;
vlib_buffer_free_list_t *fl_clone, *fl_orig;
vlib_buffer_free_list_t *orig_freelist_pool;
int k;
tr = tm->registrations[i];
@@ -883,26 +881,6 @@ start_workers (vlib_main_t * vm)
(vlib_mains[0]->error_main.counters_last_clear,
CLIB_CACHE_LINE_BYTES);
/* Fork the vlib_buffer_main_t free lists, etc. */
orig_freelist_pool = vm_clone->buffer_free_list_pool;
vm_clone->buffer_free_list_pool = 0;
/* *INDENT-OFF* */
pool_foreach (fl_orig, orig_freelist_pool,
({
pool_get_aligned (vm_clone->buffer_free_list_pool,
fl_clone, CLIB_CACHE_LINE_BYTES);
ASSERT (fl_orig - orig_freelist_pool
== fl_clone - vm_clone->buffer_free_list_pool);
fl_clone[0] = fl_orig[0];
fl_clone->buffers = 0;
vec_validate(fl_clone->buffers, 0);
vec_reset_length(fl_clone->buffers);
fl_clone->n_alloc = 0;
}));
/* *INDENT-ON* */
worker_thread_index++;
}
}
+1
View File
@@ -48,6 +48,7 @@
/* Forward declarations of structs to avoid circular dependencies. */
struct vlib_main_t;
typedef u32 vlib_log_class_t;
/* All includes in alphabetical order. */
#include <vlib/physmem.h>
+7 -5
View File
@@ -69,6 +69,13 @@ cpu {
# scheduler-priority 50
}
# buffers {
## Increase number of buffers allocated, needed only in scenarios with
## large number of interfaces and worker threads. Value is per numa node.
## Default is 16384 (8192 if running unpriviledged)
# buffers-per-numa 128000
# }
# dpdk {
## Change default settings for all interfaces
# dev default {
@@ -127,11 +134,6 @@ cpu {
## disables Jumbo MTU support
# no-multi-seg
## Increase number of buffers allocated, needed only in scenarios with
## large number of interfaces and worker threads. Value is per CPU socket.
## Default is 16384
# num-mbufs 128000
## Change hugepages allocation per-socket, needed only if there is need for
## larger number of mbufs. Default is 256M on each detected CPU socket
# socket-mem 2048,2048
+56
View File
@@ -62,6 +62,62 @@
#define CLIB_MARCH_SFX CLIB_MULTIARCH_FN
typedef struct _clib_march_fn_registration
{
void *function;
int priority;
struct _clib_march_fn_registration *next;
char *name;
} clib_march_fn_registration;
static_always_inline void *
clib_march_select_fn_ptr (clib_march_fn_registration * r)
{
void *rv = 0;
int last_prio = -1;
while (r)
{
if (last_prio < r->priority)
{
last_prio = r->priority;
rv = r->function;
}
r = r->next;
}
return rv;
}
#define CLIB_MARCH_FN_POINTER(fn) \
clib_march_select_fn_ptr (fn##_march_fn_registrations);
#define _CLIB_MARCH_FN_REGISTRATION(fn) \
static clib_march_fn_registration \
CLIB_MARCH_SFX(fn##_march_fn_registration) = \
{ \
.name = CLIB_MARCH_VARIANT_STR \
}; \
\
static void __clib_constructor \
fn##_march_register () \
{ \
clib_march_fn_registration *r; \
r = & CLIB_MARCH_SFX (fn##_march_fn_registration); \
r->priority = CLIB_MARCH_FN_PRIORITY(); \
r->next = fn##_march_fn_registrations; \
r->function = CLIB_MARCH_SFX (fn); \
fn##_march_fn_registrations = r; \
}
#ifdef CLIB_MARCH_VARIANT
#define CLIB_MARCH_FN_REGISTRATION(fn) \
extern clib_march_fn_registration *fn##_march_fn_registrations; \
_CLIB_MARCH_FN_REGISTRATION(fn)
#else
#define CLIB_MARCH_FN_REGISTRATION(fn) \
clib_march_fn_registration *fn##_march_fn_registrations = 0; \
_CLIB_MARCH_FN_REGISTRATION(fn)
#endif
#define foreach_x86_64_flags \
_ (sse3, 1, ecx, 0) \
_ (ssse3, 1, ecx, 9) \
+4
View File
@@ -91,6 +91,10 @@ do { \
#define STATIC_ASSERT_OFFSET_OF(s, e, o) \
STATIC_ASSERT (STRUCT_OFFSET_OF(s,e) == o, "Offset of " #s "." #e " must be " # o)
#define STATIC_ASSERT_FITS_IN(s, e, o) \
STATIC_ASSERT (STRUCT_OFFSET_OF(s,e) <= (o - sizeof(((s *)0)->e)), \
#s "." #e " does not fit into " # o " bytes")
/* Assert without allocating memory. */
#define ASSERT_AND_PANIC(truth) \
do { \