svm: refactor fifo

Type: refactor

Switch from a wrapped byte space to a "continuous" one wherein fifo
chunks are appended to the fifo as more data is enqueued and chunks are
removed as data is dequeued.

The fifo is still subject to a maximum size, i.e., maximum number of
bytes that can be enqueued, so the max number of chunks associated to
the fifo is also constrained.

When enqueueing data, which must fit within the available free space, if
not enough "supporting" chunk  memory is available, the fifo asks the
fifo segment for enough chunk memory to ensure that the write can
succeed. To avoid allocating large amounts of small chunks due to small
writes, if possible, the size of the chunks requested is lower capped by
min_alloc.

When dequeuing data, all the chunks that have been completely drained,
i.e., head moved beyond the chunks’ end bytes, are unlinked from the
fifo and returned to the fifo segment. The one exception to this is the
last chunk which is never unlinked.

Change-Id: I98c1dbd9135fb79650365c7e40c29238b96cd4ee
Signed-off-by: Florin Coras <fcoras@cisco.com>
This commit is contained in:
Florin Coras
2019-12-19 16:10:58 -08:00
committed by Dave Barach
parent b020806806
commit f22f4e562e
17 changed files with 1792 additions and 1987 deletions

@ -274,7 +274,7 @@ proxy_tx_callback (session_t * proxy_s)
u32 min_free;
uword *p;
min_free = clib_min (proxy_s->tx_fifo->nitems >> 3, 128 << 10);
min_free = clib_min (svm_fifo_size (proxy_s->tx_fifo) >> 3, 128 << 10);
if (svm_fifo_max_enqueue (proxy_s->tx_fifo) < min_free)
{
svm_fifo_add_want_deq_ntf (proxy_s->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
@ -356,6 +356,9 @@ active_open_connected_callback (u32 app_index, u32 opaque,
s->tx_fifo->refcnt++;
s->rx_fifo->refcnt++;
svm_fifo_init_ooo_lookup (s->tx_fifo, 1 /* deq ooo */ );
svm_fifo_init_ooo_lookup (s->rx_fifo, 0 /* enq ooo */ );
hash_set (pm->proxy_session_by_active_open_handle,
ps->vpp_active_open_handle, opaque);
@ -425,7 +428,7 @@ active_open_tx_callback (session_t * ao_s)
u32 min_free;
uword *p;
min_free = clib_min (ao_s->tx_fifo->nitems >> 3, 128 << 10);
min_free = clib_min (svm_fifo_size (ao_s->tx_fifo) >> 3, 128 << 10);
if (svm_fifo_max_enqueue (ao_s->tx_fifo) < min_free)
{
svm_fifo_add_want_deq_ntf (ao_s->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);

@ -916,10 +916,10 @@ int
quic_fifo_egress_emit (quicly_stream_t * stream, size_t off, void *dst,
size_t * len, int *wrote_all)
{
u32 deq_max, first_deq, max_rd_chunk, rem_offset;
quic_stream_data_t *stream_data;
session_t *stream_session;
svm_fifo_t *f;
u32 deq_max;
stream_data = (quic_stream_data_t *) stream->data;
stream_session = get_stream_session_from_stream (stream);
@ -943,22 +943,7 @@ quic_fifo_egress_emit (quicly_stream_t * stream, size_t off, void *dst,
if (off + *len > stream_data->app_tx_data_len)
stream_data->app_tx_data_len = off + *len;
/* TODO, use something like : return svm_fifo_peek (f, off, *len, dst); */
max_rd_chunk = svm_fifo_max_read_chunk (f);
first_deq = 0;
if (off < max_rd_chunk)
{
first_deq = clib_min (*len, max_rd_chunk - off);
clib_memcpy_fast (dst, svm_fifo_head (f) + off, first_deq);
}
if (max_rd_chunk < off + *len)
{
rem_offset = max_rd_chunk < off ? off - max_rd_chunk : 0;
clib_memcpy_fast (dst + first_deq, f->head_chunk->data + rem_offset,
*len - first_deq);
}
svm_fifo_peek (f, off, *len, dst);
return 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -28,6 +28,7 @@ add_vpp_library(svm
INSTALL_HEADERS
fifo_segment.h
fifo_types.h
message_queue.h
queue.h
ssvm.h

@ -127,6 +127,7 @@ fifo_segment_init (fifo_segment_t * fs)
{
fss = fsh_slice_get (fsh, i);
vec_validate_init_empty (fss->free_chunks, max_chunk_sz, 0);
clib_spinlock_init (&fss->chunk_lock);
}
ssvm_pop_heap (oldheap);
@ -243,6 +244,8 @@ fifo_segment_main_init (fifo_segment_main_t * sm, u64 baseva,
static inline u32
fs_freelist_for_size (u32 size)
{
if (PREDICT_FALSE (size < FIFO_SEGMENT_MIN_FIFO_SIZE))
return 0;
return max_log2 (size) - FIFO_SEGMENT_MIN_LOG2_FIFO_SIZE;
}
@ -278,9 +281,8 @@ fs_try_alloc_fifo_freelist (fifo_segment_slice_t * fss,
fss->free_fifos = f->next;
fss->free_chunks[fl_index] = c->next;
c->next = c;
c->next = 0;
c->start_byte = 0;
c->length = data_bytes;
memset (f, 0, sizeof (*f));
f->start_chunk = c;
f->end_chunk = c;
@ -331,7 +333,6 @@ fs_try_alloc_fifo_freelist_multi_chunk (fifo_segment_header_t * fsh,
c->next = first;
first = c;
n_alloc += fl_size;
c->length = clib_min (fl_size, data_bytes);
data_bytes -= c->length;
}
else
@ -370,7 +371,6 @@ fs_try_alloc_fifo_freelist_multi_chunk (fifo_segment_header_t * fsh,
f->start_chunk = first;
f->end_chunk = last;
last->next = first;
fss->n_fl_chunk_bytes -= n_alloc;
return f;
}
@ -412,7 +412,8 @@ fs_try_alloc_fifo_batch (fifo_segment_header_t * fsh,
c = (svm_fifo_chunk_t *) (fmem + sizeof (*f));
c->start_byte = 0;
c->length = rounded_data_size;
c->rb_index = RBTREE_TNIL_INDEX;
c->enq_rb_index = RBTREE_TNIL_INDEX;
c->deq_rb_index = RBTREE_TNIL_INDEX;
c->next = fss->free_chunks[fl_index];
fss->free_chunks[fl_index] = c;
fmem += hdrs + rounded_data_size;
@ -466,7 +467,7 @@ fs_try_alloc_fifo (fifo_segment_header_t * fsh, fifo_segment_slice_t * fss,
if (fifo_sz <= n_free_bytes)
{
void *oldheap = ssvm_push_heap (fsh->ssvm_sh);
f = svm_fifo_create (data_bytes);
f = svm_fifo_alloc (data_bytes);
ssvm_pop_heap (oldheap);
if (f)
{
@ -479,9 +480,87 @@ fs_try_alloc_fifo (fifo_segment_header_t * fsh, fifo_segment_slice_t * fss,
done:
if (f)
f->fs_hdr = fsh;
return f;
}
svm_fifo_chunk_t *
fsh_alloc_chunk (fifo_segment_header_t * fsh, u32 slice_index, u32 chunk_size)
{
fifo_segment_slice_t *fss;
svm_fifo_chunk_t *c;
void *oldheap;
int fl_index;
fl_index = fs_freelist_for_size (chunk_size);
fss = fsh_slice_get (fsh, slice_index);
clib_spinlock_lock (&fss->chunk_lock);
c = fss->free_chunks[fl_index];
if (!c)
{
fsh_check_mem (fsh);
chunk_size = fs_freelist_index_to_size (fl_index);
if (fsh_n_free_bytes (fsh) < chunk_size)
goto done;
oldheap = ssvm_push_heap (fsh->ssvm_sh);
c = svm_fifo_chunk_alloc (chunk_size);
ssvm_pop_heap (oldheap);
if (!c)
goto done;
fsh_free_bytes_sub (fsh, chunk_size + sizeof (*c));
}
else
{
fss->free_chunks[fl_index] = c->next;
c->next = 0;
fss->n_fl_chunk_bytes -= fs_freelist_index_to_size (fl_index);
}
done:
clib_spinlock_unlock (&fss->chunk_lock);
return c;
}
static void
fsh_slice_collect_chunks (fifo_segment_slice_t * fss, svm_fifo_chunk_t * cur)
{
svm_fifo_chunk_t *next;
int fl_index;
clib_spinlock_lock (&fss->chunk_lock);
while (cur)
{
next = cur->next;
fl_index = fs_freelist_for_size (cur->length);
cur->next = fss->free_chunks[fl_index];
cur->enq_rb_index = RBTREE_TNIL_INDEX;
cur->deq_rb_index = RBTREE_TNIL_INDEX;
fss->free_chunks[fl_index] = cur;
fss->n_fl_chunk_bytes += fs_freelist_index_to_size (fl_index);
cur = next;
}
clib_spinlock_unlock (&fss->chunk_lock);
}
void
fsh_collect_chunks (fifo_segment_header_t * fsh, u32 slice_index,
svm_fifo_chunk_t * cur)
{
fifo_segment_slice_t *fss;
fss = fsh_slice_get (fsh, slice_index);
fsh_slice_collect_chunks (fss, cur);
}
/**
* Allocate fifo in fifo segment
*/
@ -502,13 +581,8 @@ fifo_segment_alloc_fifo_w_slice (fifo_segment_t * fs, u32 slice_index,
f->slice_index = slice_index;
/* (re)initialize the fifo, as in svm_fifo_create */
svm_fifo_init (f, data_bytes);
/* Initialize chunks and rbtree for multi-chunk fifos */
if (f->start_chunk->next != f->start_chunk)
svm_fifo_init_chunks (f);
/* If rx fifo type add to active fifos list. When cleaning up segment,
* we need a list of active sessions that should be disconnected. Since
* both rx and tx fifos keep pointers to the session, it's enough to track
@ -522,7 +596,14 @@ fifo_segment_alloc_fifo_w_slice (fifo_segment_t * fs, u32 slice_index,
}
fss->fifos = f;
f->flags |= SVM_FIFO_F_LL_TRACKED;
svm_fifo_init_ooo_lookup (f, 0 /* ooo enq */ );
}
else
{
svm_fifo_init_ooo_lookup (f, 1 /* ooo deq */ );
}
fsh_active_fifos_update (fsh, 1);
done:
@ -536,9 +617,7 @@ void
fifo_segment_free_fifo (fifo_segment_t * fs, svm_fifo_t * f)
{
fifo_segment_header_t *fsh = fs->h;
svm_fifo_chunk_t *cur, *next;
fifo_segment_slice_t *fss;
int fl_index;
ASSERT (f->refcnt > 0);
@ -565,26 +644,13 @@ fifo_segment_free_fifo (fifo_segment_t * fs, svm_fifo_t * f)
fss->free_fifos = f;
/* Free fifo chunks */
cur = f->start_chunk;
do
{
next = cur->next;
fl_index = fs_freelist_for_size (cur->length);
ASSERT (fl_index < vec_len (fss->free_chunks));
cur->next = fss->free_chunks[fl_index];
cur->rb_index = RBTREE_TNIL_INDEX;
fss->free_chunks[fl_index] = cur;
fss->n_fl_chunk_bytes += fs_freelist_index_to_size (fl_index);
cur = next;
}
while (cur != f->start_chunk);
fsh_slice_collect_chunks (fss, f->start_chunk);
f->start_chunk = f->end_chunk = f->new_chunks = 0;
f->start_chunk = f->end_chunk = 0;
f->head_chunk = f->tail_chunk = f->ooo_enq = f->ooo_deq = 0;
svm_fifo_free_chunk_lookup (f);
/* not allocated on segment heap */
svm_fifo_free_chunk_lookup (f);
svm_fifo_free_ooo_data (f);
if (CLIB_DEBUG)
@ -751,71 +817,6 @@ fifo_segment_preallocate_fifo_pairs (fifo_segment_t * fs,
}
}
int
fifo_segment_grow_fifo (fifo_segment_t * fs, svm_fifo_t * f, u32 chunk_size)
{
fifo_segment_header_t *fsh = fs->h;
fifo_segment_slice_t *fss;
svm_fifo_chunk_t *c;
void *oldheap;
int fl_index;
fl_index = fs_freelist_for_size (chunk_size);
fss = fsh_slice_get (fsh, f->slice_index);
c = fss->free_chunks[fl_index];
if (!c)
{
fsh_check_mem (fsh);
if (fsh_n_free_bytes (fsh) < chunk_size)
return -1;
oldheap = ssvm_push_heap (fsh->ssvm_sh);
c = svm_fifo_chunk_alloc (chunk_size);
ssvm_pop_heap (oldheap);
if (!c)
return -1;
fsh_free_bytes_sub (fsh, chunk_size + sizeof (*c));
}
else
{
fss->free_chunks[fl_index] = c->next;
c->next = 0;
fss->n_fl_chunk_bytes -= fs_freelist_index_to_size (fl_index);
}
svm_fifo_add_chunk (f, c);
return 0;
}
int
fifo_segment_collect_fifo_chunks (fifo_segment_t * fs, svm_fifo_t * f)
{
fifo_segment_header_t *fsh = fs->h;
svm_fifo_chunk_t *cur, *next;
fifo_segment_slice_t *fss;
int fl_index;
cur = svm_fifo_collect_chunks (f);
fss = fsh_slice_get (fsh, f->slice_index);
while (cur)
{
next = cur->next;
fl_index = fs_freelist_for_size (cur->length);
cur->next = fss->free_chunks[fl_index];
fss->free_chunks[fl_index] = cur;
cur = next;
}
return 0;
}
/**
* Get number of active fifos
*/

@ -16,6 +16,7 @@
#define __included_fifo_segment_h__
#include <svm/ssvm.h>
#include <svm/fifo_types.h>
#include <svm/svm_fifo.h>
typedef enum
@ -38,26 +39,6 @@ typedef enum fifo_segment_flags_
FIFO_SEGMENT_F_MEM_LIMIT = 1 << 2,
} fifo_segment_flags_t;
typedef struct fifo_segment_slice_
{
svm_fifo_t *fifos; /**< Linked list of active RX fifos */
svm_fifo_t *free_fifos; /**< Freelists by fifo size */
svm_fifo_chunk_t **free_chunks; /**< Freelists by chunk size */
uword n_fl_chunk_bytes; /**< Chunk bytes on freelist */
} fifo_segment_slice_t;
typedef struct
{
fifo_segment_slice_t *slices; /** Fixed array of slices */
ssvm_shared_header_t *ssvm_sh; /**< Pointer to fs ssvm shared hdr */
uword n_free_bytes; /**< Segment free bytes */
u32 n_active_fifos; /**< Number of active fifos */
u32 n_reserved_bytes; /**< Bytes not to be allocated */
u32 max_log2_chunk_size; /**< Max log2(chunk size) for fs */
u8 flags; /**< Segment flags */
u8 n_slices; /**< Number of slices */
} fifo_segment_header_t;
typedef struct
{
ssvm_private_t ssvm; /**< ssvm segment data */
@ -158,25 +139,12 @@ void fifo_segment_preallocate_fifo_pairs (fifo_segment_t * fs,
u32 rx_fifo_size,
u32 tx_fifo_size,
u32 * n_fifo_pairs);
/**
* Grow fifo size by adding an additional chunk of memory
*
* @param fs fifo segment for fifo
* @param f fifo to be grown
* @param chunk_size number of bytes to be added to fifo
* @return 0 on success or a negative number otherwise
*/
int fifo_segment_grow_fifo (fifo_segment_t * fs, svm_fifo_t * f,
u32 chunk_size);
/**
* Collect unused chunks for fifo
*
* @param fs fifo segment for fifo
* @param f fifo whose chunks are to be collected
* @return 0 on success, error otherwise
*/
int fifo_segment_collect_fifo_chunks (fifo_segment_t * fs, svm_fifo_t * f);
svm_fifo_chunk_t *fsh_alloc_chunk (fifo_segment_header_t * fsh,
u32 slice_index, u32 chunk_size);
void fsh_collect_chunks (fifo_segment_header_t * fsh, u32 slice_index,
svm_fifo_chunk_t * cur);
/**
* Fifo segment estimate of number of free bytes

130
src/svm/fifo_types.h Normal file

@ -0,0 +1,130 @@
/*
* Copyright (c) 2020 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SRC_SVM_FIFO_TYPES_H_
#define SRC_SVM_FIFO_TYPES_H_
#include <svm/ssvm.h>
#include <vppinfra/clib.h>
#include <vppinfra/rbtree.h>
#define SVM_FIFO_TRACE (0)
#define SVM_FIFO_MAX_EVT_SUBSCRIBERS 7
typedef struct fifo_segment_header_ fifo_segment_header_t;
typedef struct svm_fifo_chunk_
{
u32 start_byte; /**< chunk start byte */
u32 length; /**< length of chunk in bytes */
struct svm_fifo_chunk_ *next; /**< pointer to next chunk in linked-lists */
rb_node_index_t enq_rb_index; /**< enq node index if chunk in rbtree */
rb_node_index_t deq_rb_index; /**< deq node index if chunk in rbtree */
u8 data[0]; /**< start of chunk data */
} svm_fifo_chunk_t;
typedef struct
{
u32 next; /**< Next linked-list element pool index */
u32 prev; /**< Previous linked-list element pool index */
u32 start; /**< Start of segment, normalized*/
u32 length; /**< Length of segment */
} ooo_segment_t;
typedef struct
{
u32 offset;
u32 len;
u32 action;
} svm_fifo_trace_elem_t;
typedef struct _svm_fifo
{
CLIB_CACHE_LINE_ALIGN_MARK (shared_first);
fifo_segment_header_t *fs_hdr;/**< fifo segment header for fifo */
svm_fifo_chunk_t *start_chunk;/**< first chunk in fifo chunk list */
svm_fifo_chunk_t *end_chunk; /**< end chunk in fifo chunk list */
u32 min_alloc; /**< min chunk alloc if space available */
u32 size; /**< size of the fifo in bytes */
u8 flags; /**< fifo flags */
u8 slice_index; /**< segment slice for fifo */
CLIB_CACHE_LINE_ALIGN_MARK (shared_second);
volatile u32 has_event; /**< non-zero if deq event exists */
u32 master_session_index; /**< session layer session index */
u32 client_session_index; /**< app session index */
u8 master_thread_index; /**< session layer thread index */
u8 client_thread_index; /**< app worker index */
i8 refcnt; /**< reference count */
u32 segment_manager; /**< session layer segment manager index */
u32 segment_index; /**< segment index in segment manager */
struct _svm_fifo *next; /**< next in freelist/active chain */
struct _svm_fifo *prev; /**< prev in active chain */
CLIB_CACHE_LINE_ALIGN_MARK (consumer);
rb_tree_t ooo_deq_lookup; /**< rbtree for ooo deq chunk lookup */
svm_fifo_chunk_t *head_chunk; /**< tracks chunk where head lands */
svm_fifo_chunk_t *ooo_deq; /**< last chunk used for ooo dequeue */
u32 head; /**< fifo head position/byte */
volatile u32 want_deq_ntf; /**< producer wants nudge */
volatile u32 has_deq_ntf;
CLIB_CACHE_LINE_ALIGN_MARK (producer);
rb_tree_t ooo_enq_lookup; /**< rbtree for ooo enq chunk lookup */
u32 tail; /**< fifo tail position/byte */
u32 ooos_list_head; /**< Head of out-of-order linked-list */
svm_fifo_chunk_t *tail_chunk; /**< tracks chunk where tail lands */
svm_fifo_chunk_t *ooo_enq; /**< last chunk used for ooo enqueue */
ooo_segment_t *ooo_segments; /**< Pool of ooo segments */
u32 ooos_newest; /**< Last segment to have been updated */
volatile u8 n_subscribers; /**< Number of subscribers for io events */
u8 subscribers[SVM_FIFO_MAX_EVT_SUBSCRIBERS];
#if SVM_FIFO_TRACE
svm_fifo_trace_elem_t *trace;
#endif
} svm_fifo_t;
typedef struct fifo_segment_slice_
{
svm_fifo_t *fifos; /**< Linked list of active RX fifos */
svm_fifo_t *free_fifos; /**< Freelists by fifo size */
svm_fifo_chunk_t **free_chunks; /**< Freelists by chunk size */
uword n_fl_chunk_bytes; /**< Chunk bytes on freelist */
clib_spinlock_t chunk_lock;
} fifo_segment_slice_t;
struct fifo_segment_header_
{
fifo_segment_slice_t *slices; /** Fixed array of slices */
ssvm_shared_header_t *ssvm_sh; /**< Pointer to fs ssvm shared hdr */
uword n_free_bytes; /**< Segment free bytes */
u32 n_active_fifos; /**< Number of active fifos */
u32 n_reserved_bytes; /**< Bytes not to be allocated */
u32 max_log2_chunk_size; /**< Max log2(chunk size) for fs */
u8 flags; /**< Segment flags */
u8 n_slices; /**< Number of slices */
};
#endif /* SRC_SVM_FIFO_TYPES_H_ */
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -3197,7 +3197,8 @@ vppcom_session_attr (uint32_t session_handle, uint32_t op,
/* VPP-TBD */
*(size_t *) buffer = (session->sndbuf_size ? session->sndbuf_size :
session->tx_fifo ? session->tx_fifo->nitems :
session->tx_fifo ?
svm_fifo_size (session->tx_fifo) :
vcm->cfg.tx_fifo_size);
*buflen = sizeof (u32);
@ -3228,7 +3229,8 @@ vppcom_session_attr (uint32_t session_handle, uint32_t op,
/* VPP-TBD */
*(size_t *) buffer = (session->rcvbuf_size ? session->rcvbuf_size :
session->rx_fifo ? session->rx_fifo->nitems :
session->rx_fifo ?
svm_fifo_size (session->rx_fifo) :
vcm->cfg.rx_fifo_size);
*buflen = sizeof (u32);

@ -180,7 +180,11 @@ ct_init_local_session (app_worker_t * client_wrk, app_worker_t * server_wrk,
props = application_segment_manager_properties (server);
round_rx_fifo_sz = 1 << max_log2 (props->rx_fifo_size);
round_tx_fifo_sz = 1 << max_log2 (props->tx_fifo_size);
seg_size = round_rx_fifo_sz + round_tx_fifo_sz + margin;
/* Increase size because of inefficient chunk allocations. Depending on
* how data is consumed, it may happen that more chunks than needed are
* allocated.
* TODO should remove once allocations are done more efficiently */
seg_size = 4 * (round_rx_fifo_sz + round_tx_fifo_sz + margin);
sm = app_worker_get_listen_segment_manager (server_wrk, ll);
seg_index = segment_manager_add_segment (sm, seg_size);

@ -686,50 +686,6 @@ segment_manager_dealloc_fifos (svm_fifo_t * rx_fifo, svm_fifo_t * tx_fifo)
segment_manager_segment_reader_unlock (sm);
}
int
segment_manager_grow_fifo (segment_manager_t * sm, svm_fifo_t * f, u32 size)
{
fifo_segment_t *fs;
int rv;
fs = segment_manager_get_segment_w_lock (sm, f->segment_index);
rv = fifo_segment_grow_fifo (fs, f, size);
segment_manager_segment_reader_unlock (sm);
return rv;
}
int
segment_manager_collect_fifo_chunks (segment_manager_t * sm, svm_fifo_t * f)
{
fifo_segment_t *fs;
int rv;
fs = segment_manager_get_segment_w_lock (sm, f->segment_index);
rv = fifo_segment_collect_fifo_chunks (fs, f);
segment_manager_segment_reader_unlock (sm);
return rv;
}
int
segment_manager_shrink_fifo (segment_manager_t * sm, svm_fifo_t * f, u32 size,
u8 is_producer)
{
int rv;
rv = svm_fifo_reduce_size (f, size, is_producer);
/* Nothing to collect at this point */
if (!is_producer)
return rv;
if (f->flags & SVM_FIFO_F_COLLECT_CHUNKS)
segment_manager_collect_fifo_chunks (sm, f);
return rv;
}
u32
segment_manager_evt_q_expected_size (u32 q_len)
{

@ -118,50 +118,6 @@ int segment_manager_try_alloc_fifos (fifo_segment_t * fs,
void segment_manager_dealloc_fifos (svm_fifo_t * rx_fifo,
svm_fifo_t * tx_fifo);
/**
* Grows fifo owned by segment manager
*
* @param sm segment manager that owns the fifo
* @param f fifo to be grown
* @param size amount of bytes to add to fifo
* @return 0 on success, negative number otherwise
*/
int segment_manager_grow_fifo (segment_manager_t * sm, svm_fifo_t * f,
u32 size);
/**
* Request to shrink fifo owned by segment manager
*
* If this is not called by the producer, no attempt is made to reduce the
* size until the producer tries to enqueue more data. To collect the chunks
* that are to be removed call @ref segment_manager_collect_fifo_chunks
*
* Size reduction does not affect fifo chunk boundaries. Therefore chunks are
* not split and the amount of bytes to be removed can be equal to or less
* than what was requested.
*
* @param sm segment manager that owns the fifo
* @param f fifo to be shrunk
* @param size amount of bytes to remove from fifo
* @param is_producer flag that indicates is caller is the producer for the
* fifo.
* @return actual number of bytes to be removed
*/
int segment_manager_shrink_fifo (segment_manager_t * sm, svm_fifo_t * f,
u32 size, u8 is_producer);
/**
* Collect fifo chunks that are no longer used
*
* This should not be called unless SVM_FIFO_F_COLLECT_CHUNKS is set for
* the fifo. The chunks are returned to the fifo segment freelist.
*
* @param sm segment manager that owns the fifo
* @param f fifo whose chunks are to be collected
* @return 0 on success, error otherwise
*/
int segment_manager_collect_fifo_chunks (segment_manager_t * sm,
svm_fifo_t * f);
u8 segment_manager_has_fifos (segment_manager_t * sm);
svm_msg_q_t *segment_manager_alloc_queue (fifo_segment_t * fs,

@ -492,14 +492,14 @@ always_inline u32
transport_rx_fifo_size (transport_connection_t * tc)
{
session_t *s = session_get (tc->s_index, tc->thread_index);
return s->rx_fifo->nitems;
return svm_fifo_size (s->rx_fifo);
}
always_inline u32
transport_tx_fifo_size (transport_connection_t * tc)
{
session_t *s = session_get (tc->s_index, tc->thread_index);
return s->tx_fifo->nitems;
return svm_fifo_size (s->tx_fifo);
}
always_inline u8

@ -207,6 +207,7 @@ rb_tree_add_custom (rb_tree_t * rt, u32 key, uword opaque, rb_tree_lt_fn ltfn)
{
x = rb_node (rt, xi);
y = x;
ASSERT (z->key != x->key);
if (ltfn (z->key, x->key))
xi = x->left;
else
@ -314,8 +315,8 @@ rb_tree_transplant (rb_tree_t * rt, rb_node_t * u, rb_node_t * v)
v->parent = u->parent;
}
void
rb_tree_del_node (rb_tree_t * rt, rb_node_t * z)
static void
rb_tree_del_node_internal (rb_tree_t * rt, rb_node_t * z)
{
rb_node_color_t y_original_color;
rb_node_t *x, *y, *yr, *yl, *xp, *w, *wl, *wr;
@ -440,16 +441,20 @@ rb_tree_del_node (rb_tree_t * rt, rb_node_t * z)
x->color = RBTREE_BLACK;
}
void
rb_tree_del_node (rb_tree_t * rt, rb_node_t * z)
{
rb_tree_del_node_internal (rt, z);
pool_put (rt->nodes, z);
}
void
rb_tree_del (rb_tree_t * rt, u32 key)
{
rb_node_t *n;
n = rb_tree_search_subtree (rt, rb_node (rt, rt->root), key);
if (rb_node_index (rt, n) != RBTREE_TNIL_INDEX)
{
rb_tree_del_node (rt, n);
pool_put (rt->nodes, n);
}
rb_tree_del_node (rt, n);
}
void
@ -458,10 +463,7 @@ rb_tree_del_custom (rb_tree_t * rt, u32 key, rb_tree_lt_fn ltfn)
rb_node_t *n;
n = rb_tree_search_subtree_custom (rt, rb_node (rt, rt->root), key, ltfn);
if (rb_node_index (rt, n) != RBTREE_TNIL_INDEX)
{
rb_tree_del_node (rt, n);
pool_put (rt->nodes, n);
}
rb_tree_del_node (rt, n);
}
u32
@ -490,6 +492,14 @@ rb_tree_init (rb_tree_t * rt)
tnil->color = RBTREE_BLACK;
}
int
rb_tree_is_init (rb_tree_t * rt)
{
if (pool_elts (rt->nodes) == 0)
return 0;
return 1;
}
/*
* fd.io coding-style-patch-verification: ON
*

@ -64,6 +64,7 @@ rb_node_t *rb_tree_search_subtree_custom (rb_tree_t * rt, rb_node_t * x,
u32 key, rb_tree_lt_fn ltfn);
rb_node_t *rb_tree_successor (rb_tree_t * rt, rb_node_t * x);
rb_node_t *rb_tree_predecessor (rb_tree_t * rt, rb_node_t * x);
int rb_tree_is_init (rb_tree_t * rt);
static inline rb_node_index_t
rb_node_index (rb_tree_t * rt, rb_node_t * n)