vppinfra: fixed pool from heap

Immediate benefit is ability to use hugepage backed memory.

Type: improvement
Change-Id: Ibcae961aa09ea92d3e931a40bedbc6346a4b2039
Signed-off-by: Damjan Marion <damarion@cisco.com>
This commit is contained in:
Damjan Marion
2022-03-18 12:28:35 +01:00
parent 762cfd408b
commit 86bbdf926c
2 changed files with 18 additions and 89 deletions

View File

@ -40,92 +40,36 @@
__clib_export void
_pool_init_fixed (void **pool_ptr, u32 elt_size, u32 max_elts)
{
u8 *mmap_base;
u64 vector_size;
u64 free_index_size;
u64 total_size;
u64 page_size;
pool_header_t *fh;
vec_header_t *vh;
uword *b;
pool_header_t *ph;
u8 *v;
u32 *fi;
u32 i;
u32 set_bits;
ASSERT (elt_size);
ASSERT (max_elts);
vector_size =
vec_header_bytes (pool_aligned_header_bytes) + (u64) elt_size * max_elts;
free_index_size = vec_header_bytes (0) + sizeof (u32) * max_elts;
v = vec_resize_allocate_memory (0, max_elts, elt_size * max_elts,
sizeof (pool_header_t),
CLIB_CACHE_LINE_BYTES, VEC_NUMA_UNSPECIFIED);
/* Round up to a cache line boundary */
vector_size = (vector_size + CLIB_CACHE_LINE_BYTES - 1)
& ~(CLIB_CACHE_LINE_BYTES - 1);
free_index_size = (free_index_size + CLIB_CACHE_LINE_BYTES - 1)
& ~(CLIB_CACHE_LINE_BYTES - 1);
total_size = vector_size + free_index_size;
/* Round up to an even number of pages */
page_size = clib_mem_get_page_size ();
total_size = (total_size + page_size - 1) & ~(page_size - 1);
/* mmap demand zero memory */
mmap_base = mmap (0, total_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mmap_base == MAP_FAILED)
{
clib_unix_warning ("mmap");
*pool_ptr = 0;
}
/* First comes the pool header */
fh = (pool_header_t *) mmap_base;
/* Find the user vector pointer */
v = (u8 *) (mmap_base + vec_header_bytes (pool_aligned_header_bytes));
/* Finally, the vector header */
vh = _vec_find (v);
fh->free_bitmap = 0; /* No free elts (yet) */
fh->max_elts = max_elts;
fh->mmap_base = mmap_base;
fh->mmap_size = total_size;
vh->len = max_elts;
ph = pool_header (v);
ph->max_elts = max_elts;
/* Build the free-index vector */
vh = (vec_header_t *) ((u8 *) fh + vector_size);
vh->len = max_elts;
fi = (u32 *) (vh + 1);
fh->free_indices = fi;
vec_validate_aligned (ph->free_indices, max_elts - 1, CLIB_CACHE_LINE_BYTES);
for (i = 0; i < max_elts; i++)
ph->free_indices[i] = (max_elts - 1) - i;
/* Set the entire free bitmap */
clib_bitmap_alloc (fh->free_bitmap, max_elts);
clib_memset (fh->free_bitmap, 0xff,
vec_len (fh->free_bitmap) * sizeof (uword));
clib_bitmap_alloc (ph->free_bitmap, max_elts);
/* Clear any extraneous set bits */
set_bits = vec_len (fh->free_bitmap) * BITS (uword);
for (b = ph->free_bitmap, i = max_elts; i >= uword_bits;
i -= uword_bits, b++)
b[0] = ~0ULL;
for (i = max_elts; i < set_bits; i++)
fh->free_bitmap = clib_bitmap_set (fh->free_bitmap, i, 0);
/* Create the initial free vector */
for (i = 0; i < max_elts; i++)
fi[i] = (max_elts - 1) - i;
if (i)
b[0] = pow2_mask (i);
*pool_ptr = v;
}
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/

View File

@ -61,10 +61,6 @@ typedef struct
/** Maximum size of the pool, in elements */
u32 max_elts;
/** mmap segment info: base + length */
u8 *mmap_base;
u64 mmap_size;
} pool_header_t;
/** Align pool header so that pointers are naturally aligned. */
@ -430,19 +426,8 @@ _pool_free (void *v)
return v;
clib_bitmap_free (p->free_bitmap);
if (p->max_elts)
{
int rv;
rv = munmap (p->mmap_base, p->mmap_size);
if (rv)
clib_unix_warning ("munmap");
}
else
{
vec_free (p->free_indices);
vec_free_h (v, pool_aligned_header_bytes);
}
vec_free (p->free_indices);
vec_free_h (v, pool_aligned_header_bytes);
return 0;
}