Add RTE_ prefix to internal API defined in public header.
Use the prefix instead of double underscore.
Use uppercase for macros in the case of name conflict.
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
mbuf->data_off = sizeof(octtx_pki_buflink_t);
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
if (nb_segs == 1)
mbuf->data_len = bytes_left;
else
mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
mbuf = mbuf->next;
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
mbuf->data_len = sg & 0xFFFF;
sg = sg >> 16;
uint64_t ol_flags = 0;
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
if (flag & NIX_RX_OFFLOAD_PTYPE_F)
mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
roc_prefetch_store_keep(mbuf3);
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
- __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
- __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
- __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
- __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
packets += NIX_DESCS_PER_LOOP;
}
/* Mark mempool object as "put" since it is freed by NIX */
if (!send_hdr->w0.df)
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
} else {
sg->seg1_size = m->data_len;
*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
/* Mark mempool object as "put" since it is freed by NIX */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
if (!(sg_u & (1ULL << 55)))
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
rte_io_wmb();
#endif
m = m_next;
*/
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
if (!(sg_u & (1ULL << (i + 55))))
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
#endif
slist++;
i++;
/* Mark mempool object as "put" since it is freed by NIX */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
if (!(sg_u & (1ULL << 55)))
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
rte_io_wmb();
#endif
*/
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
if (!(sg_u & (1ULL << (i + 55))))
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
rte_io_wmb();
#endif
slist++;
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
sg.u = vgetq_lane_u64(cmd1[0], 0);
if (!(sg.u & (1ULL << 55)))
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
rte_io_wmb();
#endif
return;
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
sg.u = vgetq_lane_u64(cmd1, 0);
if (!(sg.u & (1ULL << 55)))
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1,
0);
rte_io_wmb();
#endif
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0))
vsetq_lane_u64(0x80000, xmask01, 0);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf0)->pool,
(void **)&mbuf0, 1, 0);
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1))
vsetq_lane_u64(0x80000, xmask01, 1);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf1)->pool,
(void **)&mbuf1, 1, 0);
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2))
vsetq_lane_u64(0x80000, xmask23, 0);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf2)->pool,
(void **)&mbuf2, 1, 0);
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3))
vsetq_lane_u64(0x80000, xmask23, 1);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf3)->pool,
(void **)&mbuf3, 1, 0);
senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
/* Mark mempool object as "put" since
* it is freed by NIX
*/
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf0)->pool,
(void **)&mbuf0, 1, 0);
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf1)->pool,
(void **)&mbuf1, 1, 0);
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf2)->pool,
(void **)&mbuf2, 1, 0);
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf3)->pool,
(void **)&mbuf3, 1, 0);
}
mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
mbuf = mbuf->next;
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
mbuf->data_len = sg & 0xFFFF;
sg = sg >> 16;
uint64_t ol_flags = 0;
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
if (flag & NIX_RX_OFFLOAD_PTYPE_F)
packet_type = nix_ptype_get(lookup_mem, w1);
roc_prefetch_store_keep(mbuf3);
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
- __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
- __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
- __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
- __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
/* Advance head pointer and packets */
head += NIX_DESCS_PER_LOOP;
}
/* Mark mempool object as "put" since it is freed by NIX */
if (!send_hdr->w0.df)
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
}
}
/* Mark mempool object as "put" since it is freed by NIX */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
if (!(sg_u & (1ULL << (i + 55))))
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
rte_io_wmb();
#endif
slist++;
/* Mark mempool object as "put" since it is freed by NIX */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
if (!(sg_u & (1ULL << 55)))
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
rte_io_wmb();
#endif
*/
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
if (!(sg_u & (1ULL << (i + 55))))
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
rte_io_wmb();
#endif
slist++;
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
sg.u = vgetq_lane_u64(cmd1[0], 0);
if (!(sg.u & (1ULL << 55)))
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
rte_io_wmb();
#endif
return 2 + !!(flags & NIX_TX_NEED_EXT_HDR) +
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0))
vsetq_lane_u64(0x80000, xmask01, 0);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf0)->pool,
(void **)&mbuf0, 1, 0);
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1))
vsetq_lane_u64(0x80000, xmask01, 1);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf1)->pool,
(void **)&mbuf1, 1, 0);
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2))
vsetq_lane_u64(0x80000, xmask23, 0);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf2)->pool,
(void **)&mbuf2, 1, 0);
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3))
vsetq_lane_u64(0x80000, xmask23, 1);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf3)->pool,
(void **)&mbuf3, 1, 0);
senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
/* Mark mempool object as "put" since
* it is freed by NIX
*/
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf0)->pool,
(void **)&mbuf0, 1, 0);
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf1)->pool,
(void **)&mbuf1, 1, 0);
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf2)->pool,
(void **)&mbuf2, 1, 0);
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf3)->pool,
(void **)&mbuf3, 1, 0);
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
/* Mark mempool object as "put" since it is freed by PKO */
if (!(cmd_buf[0] & (1ULL << 58)))
- __mempool_check_cookies(m_tofree->pool, (void **)&m_tofree,
+ RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool, (void **)&m_tofree,
1, 0);
/* Get the gaura Id */
gaura_id =
*/
if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
tx_pkt->next = NULL;
- __mempool_check_cookies(m_tofree->pool,
+ RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool,
(void **)&m_tofree, 1, 0);
}
nb_desc++;
sd->nix_iova.addr = rte_mbuf_data_iova(m);
/* Mark mempool object as "put" since it is freed by NIX */
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
if (!ev->sched_type)
otx2_ssogws_head_wait(base + SSOW_LF_GWS_TAG);
otx2_prefetch_store_keep(mbuf3);
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
- __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
- __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
- __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
- __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
/* Advance head pointer and packets */
head += NIX_DESCS_PER_LOOP; head &= qmask;
mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
mbuf = mbuf->next;
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
mbuf->data_len = sg & 0xFFFF;
sg = sg >> 16;
uint64_t ol_flags = 0;
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
if (flag & NIX_RX_OFFLOAD_PTYPE_F)
mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
if (otx2_nix_prefree_seg(mbuf))
vsetq_lane_u64(0x80000, xmask01, 0);
else
- __mempool_check_cookies(mbuf->pool,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
(void **)&mbuf,
1, 0);
if (otx2_nix_prefree_seg(mbuf))
vsetq_lane_u64(0x80000, xmask01, 1);
else
- __mempool_check_cookies(mbuf->pool,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
(void **)&mbuf,
1, 0);
if (otx2_nix_prefree_seg(mbuf))
vsetq_lane_u64(0x80000, xmask23, 0);
else
- __mempool_check_cookies(mbuf->pool,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
(void **)&mbuf,
1, 0);
if (otx2_nix_prefree_seg(mbuf))
vsetq_lane_u64(0x80000, xmask23, 1);
else
- __mempool_check_cookies(mbuf->pool,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
(void **)&mbuf,
1, 0);
senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
*/
mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 -
offsetof(struct rte_mbuf, buf_iova));
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
1, 0);
mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 -
offsetof(struct rte_mbuf, buf_iova));
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
1, 0);
mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 -
offsetof(struct rte_mbuf, buf_iova));
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
1, 0);
mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 -
offsetof(struct rte_mbuf, buf_iova));
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
1, 0);
RTE_SET_USED(mbuf);
}
}
/* Mark mempool object as "put" since it is freed by NIX */
if (!send_hdr->w0.df)
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
}
}
/* Mark mempool object as "put" since it is freed by NIX */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
if (!(sg_u & (1ULL << (i + 55))))
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
rte_io_wmb();
#endif
slist++;
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
- tlr = __mempool_get_trailer(obj);
+ tlr = rte_mempool_get_trailer(obj);
tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
#endif
}
rte_panic("MEMPOOL: object is owned by another "
"mempool\n");
- hdr = __mempool_get_header(obj);
+ hdr = rte_mempool_get_header(obj);
cookie = hdr->cookie;
if (free == 0) {
rte_panic("MEMPOOL: bad header cookie (audit)\n");
}
}
- tlr = __mempool_get_trailer(obj);
+ tlr = rte_mempool_get_trailer(obj);
cookie = tlr->cookie;
if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
RTE_LOG(CRIT, MEMPOOL,
mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque,
void *obj, __rte_unused unsigned idx)
{
- __mempool_check_cookies(mp, &obj, 1, 2);
+ RTE_MEMPOOL_CHECK_COOKIES(mp, &obj, 1, 2);
}
static void
* Number to add to the object-oriented statistics.
*/
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-#define __MEMPOOL_STAT_ADD(mp, name, n) do { \
+#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \
unsigned __lcore_id = rte_lcore_id(); \
if (__lcore_id < RTE_MAX_LCORE) { \
mp->stats[__lcore_id].name += n; \
} \
- } while(0)
+ } while (0)
#else
-#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
+#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
#endif
/**
(sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
/* return the header of a mempool object (internal) */
-static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
+static inline struct rte_mempool_objhdr *
+rte_mempool_get_header(void *obj)
{
return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
sizeof(struct rte_mempool_objhdr));
*/
static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
{
- struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
+ struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj);
return hdr->mp;
}
/* return the trailer of a mempool object (internal) */
-static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
+static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj)
{
struct rte_mempool *mp = rte_mempool_from_obj(obj);
return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
void * const *obj_table_const, unsigned n, int free);
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-#define __mempool_check_cookies(mp, obj_table_const, n, free) \
+#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \
rte_mempool_check_cookies(mp, obj_table_const, n, free)
#else
-#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
+#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0)
#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
/**
void * const *first_obj_table_const, unsigned int n, int free);
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
- free) \
+#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
+ free) \
rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
free)
#else
-#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
- free) \
+#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
+ free) \
do {} while (0)
#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
ops = rte_mempool_get_ops(mp->ops_index);
ret = ops->dequeue(mp, obj_table, n);
if (ret == 0) {
- __MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
- __MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
+ RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
}
return ret;
}
{
struct rte_mempool_ops *ops;
- __MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
- __MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
+ RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
ops = rte_mempool_get_ops(mp->ops_index);
return ops->enqueue(mp, obj_table, n);
* A pointer to a mempool cache structure. May be NULL if not needed.
*/
static __rte_always_inline void
-__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
- unsigned int n, struct rte_mempool_cache *cache)
+rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n, struct rte_mempool_cache *cache)
{
void **cache_objs;
/* increment stat now, adding in mempool always success */
- __MEMPOOL_STAT_ADD(mp, put_bulk, 1);
- __MEMPOOL_STAT_ADD(mp, put_objs, n);
+ RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
/* No cache provided or if put would overflow mem allocated for cache */
if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
unsigned int n, struct rte_mempool_cache *cache)
{
rte_mempool_trace_generic_put(mp, obj_table, n, cache);
- __mempool_check_cookies(mp, obj_table, n, 0);
- __mempool_generic_put(mp, obj_table, n, cache);
+ RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
+ rte_mempool_do_generic_put(mp, obj_table, n, cache);
}
/**
* - <0: Error; code of ring dequeue function.
*/
static __rte_always_inline int
-__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
- unsigned int n, struct rte_mempool_cache *cache)
+rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
+ unsigned int n, struct rte_mempool_cache *cache)
{
int ret;
uint32_t index, len;
cache->len -= n;
- __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
- __MEMPOOL_STAT_ADD(mp, get_success_objs, n);
+ RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
return 0;
ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
if (ret < 0) {
- __MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
- __MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
} else {
- __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
- __MEMPOOL_STAT_ADD(mp, get_success_objs, n);
+ RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
}
return ret;
unsigned int n, struct rte_mempool_cache *cache)
{
int ret;
- ret = __mempool_generic_get(mp, obj_table, n, cache);
+ ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
if (ret == 0)
- __mempool_check_cookies(mp, obj_table, n, 1);
+ RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
rte_mempool_trace_generic_get(mp, obj_table, n, cache);
return ret;
}
ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
if (ret == 0) {
- __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
- __MEMPOOL_STAT_ADD(mp, get_success_blks, n);
- __mempool_contig_blocks_check_cookies(mp, first_obj_table, n,
- 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
+ RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
+ 1);
} else {
- __MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
- __MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
}
rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);