* @param is_mp
* Mono-producer (0) or multi-producers (1).
*/
-static inline void
+static inline void __attribute__((always_inline))
__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n, int is_mp)
{
* @param n
* The number of objects to add in the mempool from the obj_table.
*/
-static inline void
+static inline void __attribute__((always_inline))
rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
* @param n
* The number of objects to add in the mempool from obj_table.
*/
-static inline void
+static inline void __attribute__((always_inline))
rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
* @param obj
* A pointer to the object to be added.
*/
-static inline void
+static inline void __attribute__((always_inline))
rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
{
rte_mempool_mp_put_bulk(mp, &obj, 1);
* @param obj
* A pointer to the object to be added.
*/
-static inline void
+static inline void __attribute__((always_inline))
rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
{
rte_mempool_sp_put_bulk(mp, &obj, 1);
* @param obj
* A pointer to the object to be added.
*/
-static inline void
+static inline void __attribute__((always_inline))
rte_mempool_put(struct rte_mempool *mp, void *obj)
{
rte_mempool_put_bulk(mp, &obj, 1);
* - >=0: Success; number of objects supplied.
* - <0: Error; code of ring dequeue function.
*/
-static inline int
+static inline int __attribute__((always_inline))
__mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
unsigned n, int is_mc)
{
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
int ret;
* - -ENOENT: Not enough entries in the mempool; no object is
* retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
int ret;
* - 0: Success; objects taken
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
int ret;
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
{
return rte_mempool_mc_get_bulk(mp, obj_p, 1);
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
{
return rte_mempool_sc_get_bulk(mp, obj_p, 1);
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int
+static inline int __attribute__((always_inline))
rte_mempool_get(struct rte_mempool *mp, void **obj_p)
{
return rte_mempool_get_bulk(mp, obj_p, 1);
unsigned rte_mempool_count(const struct rte_mempool *mp);
/**
- * Return the number of free entries in the mempool.
+ * Return the number of free entries in the mempool ring.
+ * i.e. how many entries can be freed back to the mempool.
+ *
+ * NOTE: This corresponds to the number of elements *allocated* from the
+ * memory pool, not the number of elements in the pool itself. To count
+ * the number elements currently available in the pool, use "rte_mempool_count"
*
* When cache is enabled, this function has to browse the length of
* all lcores, so it should not be used in a data path, but only for