Add vlib_buffer_copy_indices inline function
This reverts commit 1e59f9ddbdda14591967e1d66eab8623f9ba58e4. Change-Id: Iae1d372b887e170d28cac2fe4c61325ee5a5894a Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
committed by
Florin Coras
parent
847d528825
commit
64d557cd67
@@ -295,7 +295,7 @@ avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
|
||||
or_q1x4 |= q1x4;
|
||||
u64x4_store_unaligned (q1x4, ptd->qw1s + n_rx_packets);
|
||||
clib_memcpy_fast (bi, rxq->bufs + next, 4 * sizeof (u32));
|
||||
vlib_buffer_copy_indices (bi, rxq->bufs + next, 4);
|
||||
|
||||
/* next */
|
||||
next = (next + 4) & mask;
|
||||
|
||||
@@ -66,7 +66,7 @@ avf_tx_enqueue (vlib_main_t * vm, avf_txq_t * txq, u32 * buffers,
|
||||
if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
|
||||
goto one_by_one;
|
||||
|
||||
clib_memcpy_fast (txq->bufs + next, buffers, sizeof (u32) * 4);
|
||||
vlib_buffer_copy_indices (txq->bufs + next, buffers, 4);
|
||||
|
||||
if (use_va_dma)
|
||||
{
|
||||
|
||||
@@ -101,7 +101,7 @@ mrvl_pp2_interface_tx (vlib_main_t * vm,
|
||||
buffers = vlib_frame_vector_args (frame);
|
||||
u16 n_copy = clib_min (outq->size - slot, n_sent);
|
||||
|
||||
clib_memcpy_fast (outq->buffers + slot, buffers, n_copy * sizeof (u32));
|
||||
vlib_buffer_copy_indices (outq->buffers + slot, buffers, n_copy);
|
||||
if (PREDICT_FALSE (n_copy < n_sent))
|
||||
clib_memcpy_fast (outq->buffers, buffers + n_copy,
|
||||
(n_sent - n_copy) * sizeof (u32));
|
||||
|
||||
@@ -64,6 +64,12 @@ vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
|
||||
return uword_to_pointer (bm->buffer_mem_start + offset, void *);
|
||||
}
|
||||
|
||||
static_always_inline void
|
||||
vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices)
|
||||
{
|
||||
clib_memcpy_fast (dst, src, n_indices * sizeof (u32));
|
||||
}
|
||||
|
||||
static_always_inline void
|
||||
vlib_buffer_copy_template (vlib_buffer_t * b, vlib_buffer_t * bt)
|
||||
{
|
||||
@@ -454,7 +460,7 @@ vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
|
||||
/* following code is intentionaly duplicated to allow compiler
|
||||
to optimize fast path when n_buffers is constant value */
|
||||
src = fl->buffers + len - n_buffers;
|
||||
clib_memcpy_fast (buffers, src, n_buffers * sizeof (u32));
|
||||
vlib_buffer_copy_indices (buffers, src, n_buffers);
|
||||
_vec_len (fl->buffers) -= n_buffers;
|
||||
|
||||
/* Verify that buffers are known free. */
|
||||
@@ -465,7 +471,7 @@ vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
|
||||
}
|
||||
|
||||
src = fl->buffers + len - n_buffers;
|
||||
clib_memcpy_fast (buffers, src, n_buffers * sizeof (u32));
|
||||
vlib_buffer_copy_indices (buffers, src, n_buffers);
|
||||
_vec_len (fl->buffers) -= n_buffers;
|
||||
|
||||
/* Verify that buffers are known free. */
|
||||
|
||||
@@ -383,7 +383,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
#ifdef CLIB_HAVE_VEC512
|
||||
if (n_enqueued >= 32)
|
||||
{
|
||||
clib_memcpy_fast (to_next, buffers, 32 * sizeof (u32));
|
||||
vlib_buffer_copy_indices (to_next, buffers, 32);
|
||||
nexts += 32;
|
||||
to_next += 32;
|
||||
buffers += 32;
|
||||
@@ -397,7 +397,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
#ifdef CLIB_HAVE_VEC256
|
||||
if (n_enqueued >= 16)
|
||||
{
|
||||
clib_memcpy_fast (to_next, buffers, 16 * sizeof (u32));
|
||||
vlib_buffer_copy_indices (to_next, buffers, 16);
|
||||
nexts += 16;
|
||||
to_next += 16;
|
||||
buffers += 16;
|
||||
@@ -411,7 +411,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
#ifdef CLIB_HAVE_VEC128
|
||||
if (n_enqueued >= 8)
|
||||
{
|
||||
clib_memcpy_fast (to_next, buffers, 8 * sizeof (u32));
|
||||
vlib_buffer_copy_indices (to_next, buffers, 8);
|
||||
nexts += 8;
|
||||
to_next += 8;
|
||||
buffers += 8;
|
||||
@@ -424,7 +424,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
|
||||
|
||||
if (n_enqueued >= 4)
|
||||
{
|
||||
clib_memcpy_fast (to_next, buffers, 4 * sizeof (u32));
|
||||
vlib_buffer_copy_indices (to_next, buffers, 4);
|
||||
nexts += 4;
|
||||
to_next += 4;
|
||||
buffers += 4;
|
||||
@@ -459,7 +459,7 @@ vlib_buffer_enqueue_to_single_next (vlib_main_t * vm,
|
||||
|
||||
if (PREDICT_TRUE (n_left_to_next >= count))
|
||||
{
|
||||
clib_memcpy_fast (to_next, buffers, count * sizeof (u32));
|
||||
vlib_buffer_copy_indices (to_next, buffers, count);
|
||||
n_left_to_next -= count;
|
||||
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
|
||||
return;
|
||||
@@ -467,7 +467,7 @@ vlib_buffer_enqueue_to_single_next (vlib_main_t * vm,
|
||||
|
||||
n_enq = n_left_to_next;
|
||||
next:
|
||||
clib_memcpy_fast (to_next, buffers, n_enq * sizeof (u32));
|
||||
vlib_buffer_copy_indices (to_next, buffers, n_enq);
|
||||
n_left_to_next -= n_enq;
|
||||
|
||||
if (PREDICT_FALSE (count > n_enq))
|
||||
|
||||
@@ -1590,13 +1590,12 @@ pg_generate_packets (vlib_node_runtime_t * node,
|
||||
head = clib_fifo_head (bi0->buffer_fifo);
|
||||
|
||||
if (head + n_this_frame <= end)
|
||||
clib_memcpy_fast (to_next, head, n_this_frame * sizeof (u32));
|
||||
vlib_buffer_copy_indices (to_next, head, n_this_frame);
|
||||
else
|
||||
{
|
||||
u32 n = end - head;
|
||||
clib_memcpy_fast (to_next + 0, head, n * sizeof (u32));
|
||||
clib_memcpy_fast (to_next + n, start,
|
||||
(n_this_frame - n) * sizeof (u32));
|
||||
vlib_buffer_copy_indices (to_next + 0, head, n);
|
||||
vlib_buffer_copy_indices (to_next + n, start, n_this_frame - n);
|
||||
}
|
||||
|
||||
if (s->replay_packet_templates == 0)
|
||||
|
||||
Reference in New Issue
Block a user