CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
# This software was jointly developed between OKTET Labs (under contract
# for Solarflare) and Solarflare Communications, Inc.
+allow_experimental_apis = true
+
sources = files('rte_mempool_bucket.c')
hdr->fill_cnt = 0;
hdr->lcore_id = LCORE_ID_ANY;
- rc = rte_mempool_op_populate_default(mp,
+ rc = rte_mempool_op_populate_helper(mp,
RTE_MIN(bd->obj_per_bucket,
max_objs - n_objs),
iter + bucket_header_sz,
*/
TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
- return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ return rte_mempool_op_populate_helper(mp, max_objs, vaddr, paddr, len,
obj_cb, obj_cb_arg);
}
/* Insert entry into the PA->VA Table */
dpaax_iova_table_update(paddr, vaddr, len);
- return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ return rte_mempool_op_populate_helper(mp, max_objs, vaddr, paddr, len,
obj_cb, obj_cb_arg);
}
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
EXPORT_MAP := rte_mempool_octeontx_version.map
LIBABIVER := 1
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Cavium, Inc
+allow_experimental_apis = true
+
sources = files('octeontx_fpavf.c',
'rte_mempool_octeontx.c'
)
* Simply need space for one more object to be able to
* fulfil alignment requirements.
*/
- mem_size = rte_mempool_op_calc_mem_size_default(mp, obj_num + 1,
+ mem_size = rte_mempool_op_calc_mem_size_helper(mp, obj_num + 1,
pg_shift,
min_chunk_size, align);
if (mem_size >= 0) {
if (ret < 0)
return ret;
- return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len,
+ return rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova, len,
obj_cb, obj_cb_arg);
}
endif
endif
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
EXPORT_MAP := rte_mempool_octeontx2_version.map
LIBABIVER := 1
# Copyright(C) 2019 Marvell International Ltd.
#
+allow_experimental_apis = true
+
sources = files('otx2_mempool_ops.c',
'otx2_mempool.c',
'otx2_mempool_irq.c',
* Simply need space for one more object to be able to
* fulfill alignment requirements.
*/
- return rte_mempool_op_calc_mem_size_default(mp, obj_num + 1, pg_shift,
+ return rte_mempool_op_calc_mem_size_helper(mp, obj_num + 1, pg_shift,
min_chunk_size, align);
}
if (npa_lf_aura_range_update_check(mp->pool_id) < 0)
return -EBUSY;
- return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len,
+ return rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova, len,
obj_cb, obj_cb_arg);
}
size_t *min_chunk_size, size_t *align);
/**
- * Default way to calculate memory size required to store given number of
- * objects.
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @internal Helper to calculate memory size required to store given
+ * number of objects.
+ *
+ * This function is internal to mempool library and mempool drivers.
*
* If page boundaries may be ignored, it is just a product of total
* object size including header and trailer and number of objects.
*
* Minimum size of memory chunk is the total element size.
* Required memory chunk alignment is the cache line size.
+ *
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] obj_num
+ * Number of objects to be added in mempool.
+ * @param[in] pg_shift
+ * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param[out] min_chunk_size
+ * Location for minimum size of the memory chunk which may be used to
+ * store memory pool objects.
+ * @param[out] align
+ * Location for required memory chunk alignment.
+ * @return
+ * Required memory size.
+ */
+__rte_experimental
+ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align);
+
+/**
+ * Default way to calculate memory size required to store given number of
+ * objects.
+ *
+ * Equivalent to rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
+ * min_chunk_size, align).
*/
ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
/**
- * Default way to populate memory pool object using provided memory
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @internal Helper to populate memory pool object using provided memory
* chunk: just slice objects one by one.
+ *
+ * This function is internal to mempool library and mempool drivers.
+ *
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] max_objs
+ * Maximum number of objects to be added in mempool.
+ * @param[in] vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param[in] iova
+ * The IO address corresponding to vaddr, or RTE_BAD_IOVA.
+ * @param[in] len
+ * The length of memory in bytes.
+ * @param[in] obj_cb
+ * Callback function to be executed for each populated object.
+ * @param[in] obj_cb_arg
+ * An opaque pointer passed to the callback function.
+ * @return
+ * The number of objects added in mempool.
+ */
+__rte_experimental
+int rte_mempool_op_populate_helper(struct rte_mempool *mp,
+ unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
+
+/**
+ * Default way to populate memory pool object using provided memory chunk.
+ *
+ * Equivalent to rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova,
+ * len, obj_cb, obj_cb_arg).
*/
int rte_mempool_op_populate_default(struct rte_mempool *mp,
unsigned int max_objs,
#include <rte_mempool.h>
ssize_t
-rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
- uint32_t obj_num, uint32_t pg_shift,
- size_t *min_chunk_size, size_t *align)
+rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
{
size_t total_elt_sz;
size_t obj_per_page, pg_sz, objs_in_last_page;
return mem_size;
}
+ssize_t
+rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
+{
+ return rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
+ min_chunk_size, align);
+}
+
int
-rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
- void *vaddr, rte_iova_t iova, size_t len,
- rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
size_t total_elt_sz;
size_t off;
return i;
}
+
+int
+rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb,
+ void *obj_cb_arg)
+{
+ return rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova,
+ len, obj_cb, obj_cb_arg);
+}
# added in 19.11
rte_mempool_get_page_size;
+ rte_mempool_op_calc_mem_size_helper;
+ rte_mempool_op_populate_helper;
};