#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
- tlr = __mempool_get_trailer(obj);
+ tlr = rte_mempool_get_trailer(obj);
tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
#endif
}
sz = (sz != NULL) ? sz : &lsz;
sz->header_size = sizeof(struct rte_mempool_objhdr);
- if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
+ if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0)
sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
RTE_MEMPOOL_ALIGN);
sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
/* expand trailer to next cache line */
- if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
+ if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
sz->total_size = sz->header_size + sz->elt_size +
sz->trailer_size;
sz->trailer_size += ((RTE_MEMPOOL_ALIGN -
* increase trailer to add padding between objects in order to
* spread them across memory channels/ranks
*/
- if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
+ if ((flags & RTE_MEMPOOL_F_NO_SPREAD) == 0) {
unsigned new_size;
new_size = arch_mem_object_align
(sz->header_size + sz->elt_size + sz->trailer_size);
int ret;
/* create the internal ring if not already done */
- if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
+ if ((mp->flags & RTE_MEMPOOL_F_POOL_CREATED) == 0) {
ret = rte_mempool_ops_alloc(mp);
if (ret != 0)
return ret;
- mp->flags |= MEMPOOL_F_POOL_CREATED;
+ mp->flags |= RTE_MEMPOOL_F_POOL_CREATED;
}
return 0;
}
memhdr->free_cb = free_cb;
memhdr->opaque = opaque;
- if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ if (mp->flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
else
off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr;
/* At least some objects in the pool can now be used for IO. */
if (iova != RTE_BAD_IOVA)
- mp->flags &= ~MEMPOOL_F_NON_IO;
+ mp->flags &= ~RTE_MEMPOOL_F_NON_IO;
/* Report the mempool as ready only when fully populated. */
if (mp->populated_size >= mp->size)
size_t off, phys_len;
int ret, cnt = 0;
- if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
+ if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG)
return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
len, free_cb, opaque);
if (ret < 0)
return -EINVAL;
alloc_in_ext_mem = (ret == 1);
- need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+ need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG);
if (!need_iova_contig_obj)
*pg_sz = 0;
* reserve space in smaller chunks.
*/
- need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+ need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG);
ret = rte_mempool_get_page_size(mp, &pg_sz);
if (ret < 0)
return ret;
rte_free(cache);
}
-#define MEMPOOL_KNOWN_FLAGS (MEMPOOL_F_NO_SPREAD \
- | MEMPOOL_F_NO_CACHE_ALIGN \
- | MEMPOOL_F_SP_PUT \
- | MEMPOOL_F_SC_GET \
- | MEMPOOL_F_POOL_CREATED \
- | MEMPOOL_F_NO_IOVA_CONTIG \
- )
/* create an empty mempool */
struct rte_mempool *
rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
return NULL;
}
- /* enforce no unknown flag is passed by the application */
- if ((flags & ~MEMPOOL_KNOWN_FLAGS) != 0) {
+ /* enforce only user flags are passed by the application */
+ if ((flags & ~RTE_MEMPOOL_VALID_USER_FLAGS) != 0) {
rte_errno = EINVAL;
return NULL;
}
* No objects in the pool can be used for IO until it's populated
* with at least some objects with valid IOVA.
*/
- flags |= MEMPOOL_F_NON_IO;
+ flags |= RTE_MEMPOOL_F_NON_IO;
/* "no cache align" imply "no spread" */
- if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
- flags |= MEMPOOL_F_NO_SPREAD;
+ if (flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
+ flags |= RTE_MEMPOOL_F_NO_SPREAD;
/* calculate mempool object sizes. */
if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
goto exit_unlock;
}
- mempool_size = MEMPOOL_HEADER_SIZE(mp, cache_size);
+ mempool_size = RTE_MEMPOOL_HEADER_SIZE(mp, cache_size);
mempool_size += private_data_size;
mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
/* init the mempool structure */
mp = mz->addr;
- memset(mp, 0, MEMPOOL_HEADER_SIZE(mp, cache_size));
+ memset(mp, 0, RTE_MEMPOOL_HEADER_SIZE(mp, cache_size));
ret = strlcpy(mp->name, name, sizeof(mp->name));
if (ret < 0 || ret >= (int)sizeof(mp->name)) {
rte_errno = ENAMETOOLONG;
* The local_cache points to just past the elt_pa[] array.
*/
mp->local_cache = (struct rte_mempool_cache *)
- RTE_PTR_ADD(mp, MEMPOOL_HEADER_SIZE(mp, 0));
+ RTE_PTR_ADD(mp, RTE_MEMPOOL_HEADER_SIZE(mp, 0));
/* Init all default caches. */
if (cache_size != 0) {
* Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
* set the correct index into the table of ops structs.
*/
- if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
+ if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET))
ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
- else if (flags & MEMPOOL_F_SP_PUT)
+ else if (flags & RTE_MEMPOOL_F_SP_PUT)
ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
- else if (flags & MEMPOOL_F_SC_GET)
+ else if (flags & RTE_MEMPOOL_F_SC_GET)
ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
else
ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
rte_panic("MEMPOOL: object is owned by another "
"mempool\n");
- hdr = __mempool_get_header(obj);
+ hdr = rte_mempool_get_header(obj);
cookie = hdr->cookie;
if (free == 0) {
rte_panic("MEMPOOL: bad header cookie (audit)\n");
}
}
- tlr = __mempool_get_trailer(obj);
+ tlr = rte_mempool_get_trailer(obj);
cookie = tlr->cookie;
if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
RTE_LOG(CRIT, MEMPOOL,
mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque,
void *obj, __rte_unused unsigned idx)
{
- __mempool_check_cookies(mp, &obj, 1, 2);
+ RTE_MEMPOOL_CHECK_COOKIES(mp, &obj, 1, 2);
}
static void