The following changes are planned:
- - removal of ``get_capabilities`` mempool ops and related flags.
- substitute ``register_memory_area`` with ``populate`` ops.
- addition of new op to allocate contiguous
block of objects if underlying driver supports it.
Also, make sure to start the actual text at the margin.
=========================================================
+* mempool: capability flags and related functions have been removed.
+
+ Flags ``MEMPOOL_F_CAPA_PHYS_CONTIG`` and
+ ``MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS`` were used by octeontx mempool
+ driver to customize generic mempool library behaviour.
+ Now the new driver callbacks ``calc_mem_size`` and ``populate`` may be
+ used to achieve it without specific knowledge in the generic code.
+
* mbuf: The control mbuf API has been removed in v18.05. The impacted
functions and macros are:
to allow to customize required memory size calculation.
A new callback ``populate`` has been added to ``rte_mempool_ops``
to allow to customize objects population.
+ Callback ``get_capabilities`` has been removed from ``rte_mempool_ops``
+ since its features are covered by ``calc_mem_size`` and ``populate``
+ callbacks.
* **Additional fields in rte_eth_dev_info.**
return octeontx_fpa_bufpool_free_count(pool);
}
-static int
-octeontx_fpavf_get_capabilities(const struct rte_mempool *mp,
- unsigned int *flags)
+static ssize_t
+octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
{
- RTE_SET_USED(mp);
- *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG |
- MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS);
- return 0;
+ ssize_t mem_size;
+
+ /*
+ * Simply need space for one more object to be able to
+ * fulfil alignment requirements.
+ */
+ mem_size = rte_mempool_op_calc_mem_size_default(mp, obj_num + 1,
+ pg_shift,
+ min_chunk_size, align);
+ if (mem_size >= 0) {
+ /*
+ * Memory area which contains objects must be physically
+ * contiguous.
+ */
+ *min_chunk_size = mem_size;
+ }
+
+ return mem_size;
}
static int
return octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
}
+static int
+octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ size_t total_elt_sz;
+ size_t off;
+
+ if (iova == RTE_BAD_IOVA)
+ return -EINVAL;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ /* align object start address to a multiple of total_elt_sz */
+ off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
+
+ if (len < off)
+ return -EINVAL;
+
+ vaddr = (char *)vaddr + off;
+ iova += off;
+ len -= off;
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len,
+ obj_cb, obj_cb_arg);
+}
+
static struct rte_mempool_ops octeontx_fpavf_ops = {
.name = "octeontx_fpavf",
.alloc = octeontx_fpavf_alloc,
.enqueue = octeontx_fpavf_enqueue,
.dequeue = octeontx_fpavf_dequeue,
.get_count = octeontx_fpavf_get_count,
- .get_capabilities = octeontx_fpavf_get_capabilities,
.register_memory_area = octeontx_fpavf_register_memory_area,
+ .calc_mem_size = octeontx_fpavf_calc_mem_size,
+ .populate = octeontx_fpavf_populate,
};
MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);
*/
size_t
rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift,
- unsigned int flags)
+ __rte_unused unsigned int flags)
{
size_t obj_per_page, pg_num, pg_sz;
- unsigned int mask;
-
- mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG;
- if ((flags & mask) == mask)
- /* alignment need one additional object */
- elt_num += 1;
if (total_elt_sz == 0)
return 0;
ssize_t
rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num,
size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
- uint32_t pg_shift, unsigned int flags)
+ uint32_t pg_shift, __rte_unused unsigned int flags)
{
uint32_t elt_cnt = 0;
rte_iova_t start, end;
uint32_t iova_idx;
size_t pg_sz = (size_t)1 << pg_shift;
- unsigned int mask;
-
- mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG;
- if ((flags & mask) == mask)
- /* alignment need one additional object */
- elt_num += 1;
/* if iova is NULL, assume contiguous memory */
if (iova == NULL) {
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque)
{
- unsigned total_elt_sz;
- unsigned int mp_capa_flags;
unsigned i = 0;
size_t off;
struct rte_mempool_memhdr *memhdr;
if (mp->populated_size >= mp->size)
return -ENOSPC;
- total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
-
- /* Get mempool capabilities */
- mp_capa_flags = 0;
- ret = rte_mempool_ops_get_capabilities(mp, &mp_capa_flags);
- if ((ret < 0) && (ret != -ENOTSUP))
- return ret;
-
- /* update mempool capabilities */
- mp->flags |= mp_capa_flags;
-
memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
if (memhdr == NULL)
return -ENOMEM;
memhdr->free_cb = free_cb;
memhdr->opaque = opaque;
- if (mp_capa_flags & MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS)
- /* align object start address to a multiple of total_elt_sz */
- off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
- else if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
else
off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr;
#define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */
#define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. */
#define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */
-/**
- * This capability flag is advertised by a mempool handler, if the whole
- * memory area containing the objects must be physically contiguous.
- * Note: This flag should not be passed by application.
- */
-#define MEMPOOL_F_CAPA_PHYS_CONTIG 0x0040
-/**
- * This capability flag is advertised by a mempool handler. Used for a case
- * where mempool driver wants object start address(vaddr) aligned to block
- * size(/ total element size).
- *
- * Note:
- * - This flag should not be passed by application.
- * Flag used for mempool driver only.
- * - Mempool driver must also set MEMPOOL_F_CAPA_PHYS_CONTIG flag along with
- * MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS.
- */
-#define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
/**
* @internal When debug is enabled, store some statistics.
*/
typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
-/**
- * Get the mempool capabilities.
- */
-typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp,
- unsigned int *flags);
-
/**
* Notify new memory area to mempool.
*/
* that pages are grouped in subsets of physically continuous pages big
* enough to store at least one object.
*
- * If mempool driver requires object addresses to be block size aligned
- * (MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS), space for one extra element is
- * reserved to be able to meet the requirement.
- *
- * Minimum size of memory chunk is either all required space, if
- * capabilities say that whole memory area must be physically contiguous
- * (MEMPOOL_F_CAPA_PHYS_CONTIG), or a maximum of the page size and total
+ * Minimum size of memory chunk is a maximum of the page size and total
* element size.
*
* Required memory chunk alignment is a maximum of page size and cache
rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */
rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
rte_mempool_get_count get_count; /**< Get qty of available objs. */
- /**
- * Get the mempool capabilities
- */
- rte_mempool_get_capabilities_t get_capabilities;
/**
* Notify new memory area to mempool
*/
unsigned
rte_mempool_ops_get_count(const struct rte_mempool *mp);
-/**
- * @internal wrapper for mempool_ops get_capabilities callback.
- *
- * @param mp [in]
- * Pointer to the memory pool.
- * @param flags [out]
- * Pointer to the mempool flags.
- * @return
- * - 0: Success; The mempool driver has advertised his pool capabilities in
- * flags param.
- * - -ENOTSUP - doesn't support get_capabilities ops (valid case).
- * - Otherwise, pool create fails.
- */
-int
-rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
- unsigned int *flags);
/**
* @internal wrapper for mempool_ops register_memory_area callback.
* API to notify the mempool handler when a new memory area is added to pool.
ops->enqueue = h->enqueue;
ops->dequeue = h->dequeue;
ops->get_count = h->get_count;
- ops->get_capabilities = h->get_capabilities;
ops->register_memory_area = h->register_memory_area;
ops->calc_mem_size = h->calc_mem_size;
ops->populate = h->populate;
return ops->get_count(mp);
}
-/* wrapper to get external mempool capabilities. */
-int
-rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
- unsigned int *flags)
-{
- struct rte_mempool_ops *ops;
-
- ops = rte_mempool_get_ops(mp->ops_index);
-
- RTE_FUNC_PTR_OR_ERR_RET(ops->get_capabilities, -ENOTSUP);
- return ops->get_capabilities(mp, flags);
-}
-
/* wrapper to notify new memory area to external mempool */
int
rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr,
uint32_t obj_num, uint32_t pg_shift,
size_t *min_chunk_size, size_t *align)
{
- unsigned int mp_flags;
- int ret;
size_t total_elt_sz;
size_t mem_size;
- /* Get mempool capabilities */
- mp_flags = 0;
- ret = rte_mempool_ops_get_capabilities(mp, &mp_flags);
- if ((ret < 0) && (ret != -ENOTSUP))
- return ret;
-
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
mem_size = rte_mempool_xmem_size(obj_num, total_elt_sz, pg_shift,
- mp->flags | mp_flags);
+ mp->flags);
- if (mp_flags & MEMPOOL_F_CAPA_PHYS_CONTIG)
- *min_chunk_size = mem_size;
- else
- *min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz);
+ *min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz);
*align = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, (size_t)1 << pg_shift);
DPDK_17.11 {
global:
- rte_mempool_ops_get_capabilities;
rte_mempool_ops_register_memory_area;
rte_mempool_populate_iova;
rte_mempool_populate_iova_tab;