svm: immediate fifo growth even when wrapped
Type: fix - when the fifo is wrapped, and if applicable, insert a new chunk after the tail-chunk and rebuild the rb_tree. - make sure that this new algorithm can be applied only when the fifo is used by a single thread (master-thread of the fifo). Signed-off-by: Ryujiro Shibuya <ryujiro.shibuya@owmobility.com> Change-Id: I3fc187bc496ea537ca24381e4abc08d2906c9e03
This commit is contained in:

committed by
Florin Coras

parent
a9f1e7d4fd
commit
8e20fe7ab4
@ -590,6 +590,9 @@ svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c)
|
|||||||
f->flags |= SVM_FIFO_F_MULTI_CHUNK;
|
f->flags |= SVM_FIFO_F_MULTI_CHUNK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If fifo is not wrapped, update the size now */
|
||||||
|
if (!svm_fifo_is_wrapped (f))
|
||||||
|
{
|
||||||
/* Initialize chunks and add to lookup rbtree */
|
/* Initialize chunks and add to lookup rbtree */
|
||||||
cur = c;
|
cur = c;
|
||||||
if (f->new_chunks)
|
if (f->new_chunks)
|
||||||
@ -611,14 +614,92 @@ svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c)
|
|||||||
cur = cur->next;
|
cur = cur->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If fifo is not wrapped, update the size now */
|
|
||||||
if (!svm_fifo_is_wrapped (f))
|
|
||||||
{
|
|
||||||
ASSERT (!f->new_chunks);
|
ASSERT (!f->new_chunks);
|
||||||
svm_fifo_grow (f, c);
|
svm_fifo_grow (f, c);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Wrapped */
|
||||||
|
if (f->flags & SVM_FIFO_F_SINGLE_THREAD_OWNED)
|
||||||
|
{
|
||||||
|
ASSERT (f->master_thread_index == os_get_thread_index ());
|
||||||
|
|
||||||
|
if (!f->new_chunks && f->head_chunk != f->tail_chunk)
|
||||||
|
{
|
||||||
|
u32 head = 0, tail = 0;
|
||||||
|
f_load_head_tail_cons (f, &head, &tail);
|
||||||
|
|
||||||
|
svm_fifo_chunk_t *tmp = f->tail_chunk->next;
|
||||||
|
|
||||||
|
prev = f->tail_chunk;
|
||||||
|
u32 add_bytes = 0;
|
||||||
|
cur = prev->next;
|
||||||
|
while (cur != f->start_chunk)
|
||||||
|
{
|
||||||
|
/* remove any existing rb_tree entry */
|
||||||
|
rb_tree_del (&f->chunk_lookup, cur->start_byte);
|
||||||
|
cur = cur->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* insert new chunk after the tail_chunk */
|
||||||
|
f->tail_chunk->next = c;
|
||||||
|
while (c)
|
||||||
|
{
|
||||||
|
add_bytes += c->length;
|
||||||
|
c->start_byte = prev->start_byte + prev->length;
|
||||||
|
rb_tree_add2 (&f->chunk_lookup, c->start_byte,
|
||||||
|
pointer_to_uword (c));
|
||||||
|
|
||||||
|
prev = c;
|
||||||
|
c = c->next;
|
||||||
|
}
|
||||||
|
prev->next = tmp;
|
||||||
|
|
||||||
|
/* shift existing chunks along */
|
||||||
|
cur = tmp;
|
||||||
|
while (cur != f->start_chunk)
|
||||||
|
{
|
||||||
|
cur->start_byte = prev->start_byte + prev->length;
|
||||||
|
rb_tree_add2 (&f->chunk_lookup, cur->start_byte,
|
||||||
|
pointer_to_uword (cur));
|
||||||
|
prev = cur;
|
||||||
|
cur = cur->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
f->size += add_bytes;
|
||||||
|
f->nitems = f->size - 1;
|
||||||
|
f->new_chunks = 0;
|
||||||
|
head += add_bytes;
|
||||||
|
|
||||||
|
clib_atomic_store_rel_n (&f->head, head);
|
||||||
|
ASSERT (svm_fifo_is_sane (f));
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Wrapped, and optimization of single-thread-owned fifo cannot be applied */
|
||||||
|
/* Initialize chunks and add to lookup rbtree */
|
||||||
|
cur = c;
|
||||||
|
if (f->new_chunks)
|
||||||
|
{
|
||||||
|
prev = f->new_chunks;
|
||||||
|
while (prev->next)
|
||||||
|
prev = prev->next;
|
||||||
|
prev->next = c;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
prev = f->end_chunk;
|
||||||
|
|
||||||
|
while (cur)
|
||||||
|
{
|
||||||
|
cur->start_byte = prev->start_byte + prev->length;
|
||||||
|
rb_tree_add2 (&f->chunk_lookup, cur->start_byte,
|
||||||
|
pointer_to_uword (cur));
|
||||||
|
prev = cur;
|
||||||
|
cur = cur->next;
|
||||||
|
}
|
||||||
|
|
||||||
/* Postpone size update */
|
/* Postpone size update */
|
||||||
if (!f->new_chunks)
|
if (!f->new_chunks)
|
||||||
{
|
{
|
||||||
@ -1152,6 +1233,25 @@ svm_fifo_is_sane (svm_fifo_t * f)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u8
|
||||||
|
svm_fifo_set_single_thread_owned (svm_fifo_t * f)
|
||||||
|
{
|
||||||
|
if (f->flags & SVM_FIFO_F_SINGLE_THREAD_OWNED)
|
||||||
|
{
|
||||||
|
if (f->master_thread_index == os_get_thread_index ())
|
||||||
|
{
|
||||||
|
/* just a duplicate call */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* already owned by another thread */
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
f->flags |= SVM_FIFO_F_SINGLE_THREAD_OWNED;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
u8 *
|
u8 *
|
||||||
format_ooo_segment (u8 * s, va_list * args)
|
format_ooo_segment (u8 * s, va_list * args)
|
||||||
{
|
{
|
||||||
|
@ -70,6 +70,7 @@ typedef enum svm_fifo_flag_
|
|||||||
SVM_FIFO_F_SHRINK = 1 << 2,
|
SVM_FIFO_F_SHRINK = 1 << 2,
|
||||||
SVM_FIFO_F_COLLECT_CHUNKS = 1 << 3,
|
SVM_FIFO_F_COLLECT_CHUNKS = 1 << 3,
|
||||||
SVM_FIFO_F_LL_TRACKED = 1 << 4,
|
SVM_FIFO_F_LL_TRACKED = 1 << 4,
|
||||||
|
SVM_FIFO_F_SINGLE_THREAD_OWNED = 1 << 5,
|
||||||
} svm_fifo_flag_t;
|
} svm_fifo_flag_t;
|
||||||
|
|
||||||
typedef struct _svm_fifo
|
typedef struct _svm_fifo
|
||||||
@ -477,6 +478,14 @@ ooo_segment_t *svm_fifo_first_ooo_segment (svm_fifo_t * f);
|
|||||||
* @return 1 if sane, 0 otherwise
|
* @return 1 if sane, 0 otherwise
|
||||||
*/
|
*/
|
||||||
u8 svm_fifo_is_sane (svm_fifo_t * f);
|
u8 svm_fifo_is_sane (svm_fifo_t * f);
|
||||||
|
/**
|
||||||
|
* Declare this fifo is used by only a single thread.
|
||||||
|
* In this special case, fifo-growth can be done in an efficient way without delay.
|
||||||
|
*
|
||||||
|
* @param f fifo
|
||||||
|
* @return 1 if the fifo is already owned by another thread, 0 otherwise
|
||||||
|
*/
|
||||||
|
u8 svm_fifo_set_single_thread_owned (svm_fifo_t * f);
|
||||||
format_function_t format_svm_fifo;
|
format_function_t format_svm_fifo;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Reference in New Issue
Block a user