* @param n
* The number of objects to store back in the mempool, must be strictly
* positive.
- * @param is_mp
- * Mono-producer (0) or multi-producers (1).
+ * @param flags
+ * The flags used for the mempool creation.
+ * Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
*/
static inline void __attribute__((always_inline))
__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
- unsigned n, int is_mp)
+ unsigned n, int flags)
{
struct rte_mempool_cache *cache;
uint32_t index;
__MEMPOOL_STAT_ADD(mp, put, n);
/* cache is not enabled or single producer or non-EAL thread */
- if (unlikely(cache_size == 0 || is_mp == 0 ||
+ if (unlikely(cache_size == 0 || flags & MEMPOOL_F_SP_PUT ||
lcore_id >= RTE_MAX_LCORE))
goto ring_enqueue;
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the mempool from the obj_table.
- * @param is_mp
- * Mono-producer (0) or multi-producers (1).
+ * @param flags
+ * The flags used for the mempool creation.
+ * Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
*/
static inline void __attribute__((always_inline))
rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
- unsigned n, int is_mp)
+ unsigned n, int flags)
{
__mempool_check_cookies(mp, obj_table, n, 0);
- __mempool_generic_put(mp, obj_table, n, is_mp);
+ __mempool_generic_put(mp, obj_table, n, flags);
}
/**
rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- rte_mempool_generic_put(mp, obj_table, n, 1);
+ rte_mempool_generic_put(mp, obj_table, n, 0);
}
/**
rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- rte_mempool_generic_put(mp, obj_table, n, 0);
+ rte_mempool_generic_put(mp, obj_table, n, MEMPOOL_F_SP_PUT);
}
/**
rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- rte_mempool_generic_put(mp, obj_table, n,
- !(mp->flags & MEMPOOL_F_SP_PUT));
+ rte_mempool_generic_put(mp, obj_table, n, mp->flags);
}
/**
static inline void __attribute__((always_inline))
rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
{
- rte_mempool_generic_put(mp, &obj, 1, 1);
+ rte_mempool_generic_put(mp, &obj, 1, 0);
}
/**
static inline void __attribute__((always_inline))
rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
{
- rte_mempool_generic_put(mp, &obj, 1, 0);
+ rte_mempool_generic_put(mp, &obj, 1, MEMPOOL_F_SP_PUT);
}
/**
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to get, must be strictly positive.
- * @param is_mc
- * Mono-consumer (0) or multi-consumers (1).
+ * @param flags
+ * The flags used for the mempool creation.
+ * Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers.
* @return
* - >=0: Success; number of objects supplied.
* - <0: Error; code of ring dequeue function.
*/
static inline int __attribute__((always_inline))
__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
- unsigned n, int is_mc)
+ unsigned n, int flags)
{
int ret;
struct rte_mempool_cache *cache;
uint32_t cache_size = mp->cache_size;
/* cache is not enabled or single consumer */
- if (unlikely(cache_size == 0 || is_mc == 0 ||
+ if (unlikely(cache_size == 0 || flags & MEMPOOL_F_SC_GET ||
n >= cache_size || lcore_id >= RTE_MAX_LCORE))
goto ring_dequeue;
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to get from mempool to obj_table.
- * @param is_mc
- * Mono-consumer (0) or multi-consumers (1).
+ * @param flags
+ * The flags used for the mempool creation.
+ * Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers.
* @return
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
static inline int __attribute__((always_inline))
rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
- int is_mc)
+ int flags)
{
int ret;
- ret = __mempool_generic_get(mp, obj_table, n, is_mc);
+ ret = __mempool_generic_get(mp, obj_table, n, flags);
if (ret == 0)
__mempool_check_cookies(mp, obj_table, n, 1);
return ret;
static inline int __attribute__((always_inline))
rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_mempool_generic_get(mp, obj_table, n, 1);
+ return rte_mempool_generic_get(mp, obj_table, n, 0);
}
/**
static inline int __attribute__((always_inline))
rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_mempool_generic_get(mp, obj_table, n, 0);
+ return rte_mempool_generic_get(mp, obj_table, n, MEMPOOL_F_SC_GET);
}
/**
static inline int __attribute__((always_inline))
rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_mempool_generic_get(mp, obj_table, n,
- !(mp->flags & MEMPOOL_F_SC_GET));
+ return rte_mempool_generic_get(mp, obj_table, n, mp->flags);
}
/**
static inline int __attribute__((always_inline))
rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
{
- return rte_mempool_generic_get(mp, obj_p, 1, 1);
+ return rte_mempool_generic_get(mp, obj_p, 1, 0);
}
/**
static inline int __attribute__((always_inline))
rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
{
- return rte_mempool_generic_get(mp, obj_p, 1, 0);
+ return rte_mempool_generic_get(mp, obj_p, 1, MEMPOOL_F_SC_GET);
}
/**