Fix the mempool flags namespace by adding an RTE_ prefix to the name.
The old flags remain usable, to be deprecated in the future.
Flag MEMPOOL_F_NON_IO added in the release is just renamed to have RTE_
prefix.
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
"\t -- Not used for IO (%c)\n",
ptr->name,
ptr->socket_id,
- (flags & MEMPOOL_F_NO_SPREAD) ? 'y' : 'n',
- (flags & MEMPOOL_F_NO_CACHE_ALIGN) ? 'y' : 'n',
- (flags & MEMPOOL_F_SP_PUT) ? 'y' : 'n',
- (flags & MEMPOOL_F_SC_GET) ? 'y' : 'n',
- (flags & MEMPOOL_F_POOL_CREATED) ? 'y' : 'n',
- (flags & MEMPOOL_F_NO_IOVA_CONTIG) ? 'y' : 'n',
- (flags & MEMPOOL_F_NON_IO) ? 'y' : 'n');
+ (flags & RTE_MEMPOOL_F_NO_SPREAD) ? 'y' : 'n',
+ (flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) ? 'y' : 'n',
+ (flags & RTE_MEMPOOL_F_SP_PUT) ? 'y' : 'n',
+ (flags & RTE_MEMPOOL_F_SC_GET) ? 'y' : 'n',
+ (flags & RTE_MEMPOOL_F_POOL_CREATED) ? 'y' : 'n',
+ (flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG) ? 'y' : 'n',
+ (flags & RTE_MEMPOOL_F_NON_IO) ? 'y' : 'n');
printf(" - Size %u Cache %u element %u\n"
" - header %u trailer %u\n"
" - private data size %u\n",
"noisy-lkup-num-reads-writes must be >= 0\n");
}
if (!strcmp(lgopts[opt_idx].name, "no-iova-contig"))
- mempool_flags = MEMPOOL_F_NO_IOVA_CONTIG;
+ mempool_flags = RTE_MEMPOOL_F_NO_IOVA_CONTIG;
if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) {
char *end = NULL;
rx_mode.offloads = rx_offloads;
tx_mode.offloads = tx_offloads;
- if (mempool_flags & MEMPOOL_F_NO_IOVA_CONTIG &&
+ if (mempool_flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG &&
mp_alloc_type != MP_ALLOC_ANON) {
TESTPMD_LOG(WARNING, "cannot use no-iova-contig without "
"mp-alloc=anon. mempool no-iova-contig is "
MEMPOOL_ELT_SIZE, 0, 0,
NULL, NULL,
NULL, NULL,
- SOCKET_ID_ANY, MEMPOOL_F_NO_IOVA_CONTIG << 1);
+ SOCKET_ID_ANY, RTE_MEMPOOL_F_NO_IOVA_CONTIG << 1);
if (mp_cov != NULL) {
rte_mempool_free(mp_cov);
my_mp_init, NULL,
my_obj_init, NULL,
SOCKET_ID_ANY,
- MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT |
- MEMPOOL_F_SC_GET);
+ RTE_MEMPOOL_F_NO_CACHE_ALIGN | RTE_MEMPOOL_F_SP_PUT |
+ RTE_MEMPOOL_F_SC_GET);
if (mp_spsc == NULL)
RET_ERR();
}
mp = rte_mempool_create_empty("empty", MEMPOOL_SIZE,
MEMPOOL_ELT_SIZE, 0, 0,
- SOCKET_ID_ANY, MEMPOOL_F_NO_IOVA_CONTIG);
+ SOCKET_ID_ANY, RTE_MEMPOOL_F_NO_IOVA_CONTIG);
RTE_TEST_ASSERT_NOT_NULL(mp, "Cannot create mempool: %s",
rte_strerror(rte_errno));
rte_mempool_set_ops_byname(mp, rte_mbuf_best_mempool_ops(), NULL);
ret = rte_mempool_populate_default(mp);
RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
rte_strerror(-ret));
- RTE_TEST_ASSERT(mp->flags & MEMPOOL_F_NON_IO,
+ RTE_TEST_ASSERT(mp->flags & RTE_MEMPOOL_F_NON_IO,
"NON_IO flag is not set when NO_IOVA_CONTIG is set");
ret = TEST_SUCCESS;
exit:
RTE_BAD_IOVA, block_size, NULL, NULL);
RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
rte_strerror(-ret));
- RTE_TEST_ASSERT(mp->flags & MEMPOOL_F_NON_IO,
+ RTE_TEST_ASSERT(mp->flags & RTE_MEMPOOL_F_NON_IO,
"NON_IO flag is not set when mempool is populated with only RTE_BAD_IOVA");
ret = rte_mempool_populate_iova(mp, virt, iova, block_size, NULL, NULL);
RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
rte_strerror(-ret));
- RTE_TEST_ASSERT(!(mp->flags & MEMPOOL_F_NON_IO),
+ RTE_TEST_ASSERT(!(mp->flags & RTE_MEMPOOL_F_NON_IO),
"NON_IO flag is not unset when mempool is populated with valid IOVA");
ret = rte_mempool_populate_iova(mp, RTE_PTR_ADD(virt, 2 * block_size),
RTE_BAD_IOVA, block_size, NULL, NULL);
RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
rte_strerror(-ret));
- RTE_TEST_ASSERT(!(mp->flags & MEMPOOL_F_NON_IO),
+ RTE_TEST_ASSERT(!(mp->flags & RTE_MEMPOOL_F_NON_IO),
"NON_IO flag is set even when some objects have valid IOVA");
ret = TEST_SUCCESS;
ret = rte_mempool_populate_default(mp);
RTE_TEST_ASSERT_EQUAL(ret, (int)mp->size, "Failed to populate mempool: %s",
rte_strerror(-ret));
- RTE_TEST_ASSERT(!(mp->flags & MEMPOOL_F_NON_IO),
+ RTE_TEST_ASSERT(!(mp->flags & RTE_MEMPOOL_F_NON_IO),
"NON_IO flag is set by default");
ret = TEST_SUCCESS;
exit:
- ``mr_mempool_reg_en`` parameter [int]
A nonzero value enables implicit registration of DMA memory of all mempools
- except those having ``MEMPOOL_F_NON_IO``. This flag is set automatically
+ except those having ``RTE_MEMPOOL_F_NON_IO``. This flag is set automatically
for mempools populated with non-contiguous objects or those without IOVA.
The effect is that when a packet from a mempool is transmitted,
its memory is already registered for DMA in the PMD and no registration
removed. Its usages have been replaced by a new function
``rte_kvargs_get_with_value()``.
-* mempool: Added ``MEMPOOL_F_NON_IO`` flag to give a hint to DPDK components
+* mempool: Added ``RTE_MEMPOOL_F_NON_IO`` flag to give a hint to DPDK components
that objects from this pool will not be used for device IO (e.g. DMA).
+* mempool: The mempool flags ``MEMPOOL_F_*`` will be deprecated in the future.
+ Newly added flags with ``RTE_MEMPOOL_F_`` prefix should be used instead.
+
* net: Renamed ``s_addr`` and ``d_addr`` fields of ``rte_ether_hdr`` structure
to ``src_addr`` and ``dst_addr``, respectively.
mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,
struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
{
- if (mp->flags & MEMPOOL_F_NON_IO)
+ if (mp->flags & RTE_MEMPOOL_F_NON_IO)
return 0;
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,
struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
{
- if (mp->flags & MEMPOOL_F_NON_IO)
+ if (mp->flags & RTE_MEMPOOL_F_NON_IO)
return 0;
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
cache_sz /= rte_lcore_count();
/* Create chunk pool. */
if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
- mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
+ mp_flags = RTE_MEMPOOL_F_SP_PUT | RTE_MEMPOOL_F_SC_GET;
plt_tim_dbg("Using single producer mode");
tim_ring->prod_type_sp = true;
}
}
if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
- mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
+ mp_flags = RTE_MEMPOOL_F_SP_PUT | RTE_MEMPOOL_F_SC_GET;
timvf_log_info("Using single producer mode");
}
cache_sz /= rte_lcore_count();
/* Create chunk pool. */
if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
- mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
+ mp_flags = RTE_MEMPOOL_F_SP_PUT | RTE_MEMPOOL_F_SC_GET;
otx2_tim_dbg("Using single producer mode");
tim_ring->prod_type_sp = true;
}
goto error;
rg_flags = RING_F_SC_DEQ;
- if (mp->flags & MEMPOOL_F_SP_PUT)
+ if (mp->flags & RTE_MEMPOOL_F_SP_PUT)
rg_flags |= RING_F_SP_ENQ;
bd->adoption_buffer_rings[lcore_id] = rte_ring_create(rg_name,
rte_align32pow2(mp->size + 1), mp->socket_id, rg_flags);
goto no_mem_for_data;
}
bd->pool = mp;
- if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ if (mp->flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
bucket_header_size = sizeof(struct bucket_header);
else
bucket_header_size = RTE_CACHE_LINE_SIZE;
goto no_mem_for_stacks;
}
- if (mp->flags & MEMPOOL_F_SP_PUT)
+ if (mp->flags & RTE_MEMPOOL_F_SP_PUT)
rg_flags |= RING_F_SP_ENQ;
- if (mp->flags & MEMPOOL_F_SC_GET)
+ if (mp->flags & RTE_MEMPOOL_F_SC_GET)
rg_flags |= RING_F_SC_DEQ;
rc = snprintf(rg_name, sizeof(rg_name),
RTE_MEMPOOL_MZ_FORMAT ".0", mp->name);
{
uint32_t rg_flags = 0;
- if (mp->flags & MEMPOOL_F_SP_PUT)
+ if (mp->flags & RTE_MEMPOOL_F_SP_PUT)
rg_flags |= RING_F_SP_ENQ;
- if (mp->flags & MEMPOOL_F_SC_GET)
+ if (mp->flags & RTE_MEMPOOL_F_SC_GET)
rg_flags |= RING_F_SC_DEQ;
return ring_alloc(mp, rg_flags);
mr_ctrl, mp, addr);
/*
* Lookup can only fail on invalid input, e.g. "addr"
- * is not from "mp" or "mp" has MEMPOOL_F_NON_IO set.
+ * is not from "mp" or "mp" has RTE_MEMPOOL_F_NON_IO set.
*/
if (lkey != UINT32_MAX)
return lkey;
txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
0, 0, dev->node,
- MEMPOOL_F_NO_SPREAD);
+ RTE_MEMPOOL_F_NO_SPREAD);
txq->nb_sqb_bufs = nb_sqb_bufs;
txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
txq->nb_sqb_bufs_adj = nb_sqb_bufs -
goto fail;
}
- tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
+ tmp = rte_mempool_calc_obj_size(blk_sz, RTE_MEMPOOL_F_NO_SPREAD, &sz);
if (dev->sqb_size != sz.elt_size) {
otx2_err("sqe pool block size is not expected %d != %d",
dev->sqb_size, tmp);
}
/* Mempool memory must be physically contiguous */
- if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
+ if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG) {
PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
return -EINVAL;
}
sz = (sz != NULL) ? sz : &lsz;
sz->header_size = sizeof(struct rte_mempool_objhdr);
- if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
+ if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0)
sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
RTE_MEMPOOL_ALIGN);
sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
/* expand trailer to next cache line */
- if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
+ if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
sz->total_size = sz->header_size + sz->elt_size +
sz->trailer_size;
sz->trailer_size += ((RTE_MEMPOOL_ALIGN -
* increase trailer to add padding between objects in order to
* spread them across memory channels/ranks
*/
- if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
+ if ((flags & RTE_MEMPOOL_F_NO_SPREAD) == 0) {
unsigned new_size;
new_size = arch_mem_object_align
(sz->header_size + sz->elt_size + sz->trailer_size);
int ret;
/* create the internal ring if not already done */
- if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
+ if ((mp->flags & RTE_MEMPOOL_F_POOL_CREATED) == 0) {
ret = rte_mempool_ops_alloc(mp);
if (ret != 0)
return ret;
- mp->flags |= MEMPOOL_F_POOL_CREATED;
+ mp->flags |= RTE_MEMPOOL_F_POOL_CREATED;
}
return 0;
}
memhdr->free_cb = free_cb;
memhdr->opaque = opaque;
- if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ if (mp->flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
else
off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr;
/* At least some objects in the pool can now be used for IO. */
if (iova != RTE_BAD_IOVA)
- mp->flags &= ~MEMPOOL_F_NON_IO;
+ mp->flags &= ~RTE_MEMPOOL_F_NON_IO;
/* Report the mempool as ready only when fully populated. */
if (mp->populated_size >= mp->size)
size_t off, phys_len;
int ret, cnt = 0;
- if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
+ if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG)
return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
len, free_cb, opaque);
if (ret < 0)
return -EINVAL;
alloc_in_ext_mem = (ret == 1);
- need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+ need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG);
if (!need_iova_contig_obj)
*pg_sz = 0;
* reserve space in smaller chunks.
*/
- need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+ need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG);
ret = rte_mempool_get_page_size(mp, &pg_sz);
if (ret < 0)
return ret;
rte_free(cache);
}
-#define MEMPOOL_KNOWN_FLAGS (MEMPOOL_F_NO_SPREAD \
- | MEMPOOL_F_NO_CACHE_ALIGN \
- | MEMPOOL_F_SP_PUT \
- | MEMPOOL_F_SC_GET \
- | MEMPOOL_F_POOL_CREATED \
- | MEMPOOL_F_NO_IOVA_CONTIG \
+#define MEMPOOL_KNOWN_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \
+ | RTE_MEMPOOL_F_NO_CACHE_ALIGN \
+ | RTE_MEMPOOL_F_SP_PUT \
+ | RTE_MEMPOOL_F_SC_GET \
+ | RTE_MEMPOOL_F_POOL_CREATED \
+ | RTE_MEMPOOL_F_NO_IOVA_CONTIG \
)
/* create an empty mempool */
struct rte_mempool *
* No objects in the pool can be used for IO until it's populated
* with at least some objects with valid IOVA.
*/
- flags |= MEMPOOL_F_NON_IO;
+ flags |= RTE_MEMPOOL_F_NON_IO;
/* "no cache align" imply "no spread" */
- if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
- flags |= MEMPOOL_F_NO_SPREAD;
+ if (flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
+ flags |= RTE_MEMPOOL_F_NO_SPREAD;
/* calculate mempool object sizes. */
if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
* Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
* set the correct index into the table of ops structs.
*/
- if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
+ if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET))
ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
- else if (flags & MEMPOOL_F_SP_PUT)
+ else if (flags & RTE_MEMPOOL_F_SP_PUT)
ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
- else if (flags & MEMPOOL_F_SC_GET)
+ else if (flags & RTE_MEMPOOL_F_SC_GET)
ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
else
ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
} __rte_cache_aligned;
/** Spreading among memory channels not required. */
-#define MEMPOOL_F_NO_SPREAD 0x0001
+#define RTE_MEMPOOL_F_NO_SPREAD 0x0001
+/**
+ * Backward compatibility synonym for RTE_MEMPOOL_F_NO_SPREAD.
+ * To be deprecated.
+ */
+#define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD
/** Do not align objects on cache lines. */
-#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002
+#define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002
+/**
+ * Backward compatibility synonym for RTE_MEMPOOL_F_NO_CACHE_ALIGN.
+ * To be deprecated.
+ */
+#define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN
/** Default put is "single-producer". */
-#define MEMPOOL_F_SP_PUT 0x0004
+#define RTE_MEMPOOL_F_SP_PUT 0x0004
+/**
+ * Backward compatibility synonym for RTE_MEMPOOL_F_SP_PUT.
+ * To be deprecated.
+ */
+#define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT
/** Default get is "single-consumer". */
-#define MEMPOOL_F_SC_GET 0x0008
+#define RTE_MEMPOOL_F_SC_GET 0x0008
+/**
+ * Backward compatibility synonym for RTE_MEMPOOL_F_SC_GET.
+ * To be deprecated.
+ */
+#define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET
/** Internal: pool is created. */
-#define MEMPOOL_F_POOL_CREATED 0x0010
+#define RTE_MEMPOOL_F_POOL_CREATED 0x0010
/** Don't need IOVA contiguous objects. */
-#define MEMPOOL_F_NO_IOVA_CONTIG 0x0020
+#define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020
+/**
+ * Backward compatibility synonym for RTE_MEMPOOL_F_NO_IOVA_CONTIG.
+ * To be deprecated.
+ */
+#define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG
/** Internal: no object from the pool can be used for device IO (DMA). */
-#define MEMPOOL_F_NON_IO 0x0040
+#define RTE_MEMPOOL_F_NON_IO 0x0040
/**
* @internal When debug is enabled, store some statistics.
* Calculate memory size required to store given number of objects.
*
* If mempool objects are not required to be IOVA-contiguous
- * (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines
+ * (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines
* virtually contiguous chunk size. Otherwise, if mempool objects must
- * be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear),
+ * be IOVA-contiguous (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is clear),
* min_chunk_size defines IOVA-contiguous chunk size.
*
* @param[in] mp
* constraint for the reserved zone.
* @param flags
* The *flags* arguments is an OR of following flags:
- * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
+ * - RTE_MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
* between channels in RAM: the pool allocator will add padding
* between objects depending on the hardware configuration. See
* Memory alignment constraints for details. If this flag is set,
* the allocator will just align them to a cache line.
- * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
+ * - RTE_MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
* cache-aligned. This flag removes this constraint, and no
* padding will be present between objects. This flag implies
- * MEMPOOL_F_NO_SPREAD.
- * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
+ * RTE_MEMPOOL_F_NO_SPREAD.
+ * - RTE_MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
* when using rte_mempool_put() or rte_mempool_put_bulk() is
* "single-producer". Otherwise, it is "multi-producers".
- * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
+ * - RTE_MEMPOOL_F_SC_GET: If this flag is set, the default behavior
* when using rte_mempool_get() or rte_mempool_get_bulk() is
* "single-consumer". Otherwise, it is "multi-consumers".
- * - MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't
+ * - RTE_MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't
* necessarily be contiguous in IO memory.
* @return
* The pointer to the new allocated mempool, on success. NULL on error
* A pointer (virtual address) to the element of the pool.
* @return
* The IO address of the elt element.
- * If the mempool was created with MEMPOOL_F_NO_IOVA_CONTIG, the
+ * If the mempool was created with RTE_MEMPOOL_F_NO_IOVA_CONTIG, the
* returned value is RTE_BAD_IOVA.
*/
static inline rte_iova_t
unsigned i;
/* too late, the mempool is already populated. */
- if (mp->flags & MEMPOOL_F_POOL_CREATED)
+ if (mp->flags & RTE_MEMPOOL_F_POOL_CREATED)
return -EEXIST;
for (i = 0; i < rte_mempool_ops_table.num_ops; i++) {
rte_errno = EINVAL;
return -1;
}
- if (mp->flags & MEMPOOL_F_SP_PUT || mp->flags & MEMPOOL_F_SC_GET) {
+ if (mp->flags & RTE_MEMPOOL_F_SP_PUT ||
+ mp->flags & RTE_MEMPOOL_F_SC_GET) {
PDUMP_LOG(ERR,
"mempool with SP or SC set not valid for pdump,"
"must have MP and MC set\n");
vq->iotlb_pool = rte_mempool_create(pool_name,
IOTLB_CACHE_SIZE, sizeof(struct vhost_iotlb_entry), 0,
0, 0, NULL, NULL, NULL, socket,
- MEMPOOL_F_NO_CACHE_ALIGN |
- MEMPOOL_F_SP_PUT);
+ RTE_MEMPOOL_F_NO_CACHE_ALIGN |
+ RTE_MEMPOOL_F_SP_PUT);
if (!vq->iotlb_pool) {
VHOST_LOG_CONFIG(ERR,
"Failed to create IOTLB cache pool (%s)\n",