+static int
+find_min_pagesz(const struct rte_memseg_list *msl, void *arg)
+{
+ size_t *min = arg;
+
+ if (msl->page_sz < *min)
+ *min = msl->page_sz;
+
+ return 0;
+}
+
+static size_t
+get_min_page_size(void)
+{
+ size_t min_pagesz = SIZE_MAX;
+
+ rte_memseg_list_walk(find_min_pagesz, &min_pagesz);
+
+ return min_pagesz == SIZE_MAX ? (size_t) getpagesize() : min_pagesz;
+}
+
+
+static void
+mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque,
+ void *obj, rte_iova_t iova)
+{
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mempool_objtlr *tlr __rte_unused;
+
+ /* set mempool ptr in header */
+ hdr = RTE_PTR_SUB(obj, sizeof(*hdr));
+ hdr->mp = mp;
+ hdr->iova = iova;
+ STAILQ_INSERT_TAIL(&mp->elt_list, hdr, next);
+ mp->populated_size++;
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
+ tlr = __mempool_get_trailer(obj);
+ tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
+#endif
+}
+
+/* call obj_cb() for each mempool element */
+uint32_t
+rte_mempool_obj_iter(struct rte_mempool *mp,
+ rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct rte_mempool_objhdr *hdr;
+ void *obj;
+ unsigned n = 0;
+
+ STAILQ_FOREACH(hdr, &mp->elt_list, next) {
+ obj = (char *)hdr + sizeof(*hdr);
+ obj_cb(mp, obj_cb_arg, obj, n);
+ n++;
+ }
+
+ return n;
+}
+
+/* call mem_cb() for each mempool memory chunk */
+uint32_t
+rte_mempool_mem_iter(struct rte_mempool *mp,
+ rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
+{
+ struct rte_mempool_memhdr *hdr;
+ unsigned n = 0;
+
+ STAILQ_FOREACH(hdr, &mp->mem_list, next) {
+ mem_cb(mp, mem_cb_arg, hdr, n);
+ n++;
+ }
+
+ return n;
+}
+
+/* get the header, trailer and total size of a mempool element. */
+uint32_t
+rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
+ struct rte_mempool_objsz *sz)
+{
+ struct rte_mempool_objsz lsz;
+
+ sz = (sz != NULL) ? sz : &lsz;
+
+ sz->header_size = sizeof(struct rte_mempool_objhdr);
+ if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
+ sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
+ RTE_MEMPOOL_ALIGN);
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ sz->trailer_size = sizeof(struct rte_mempool_objtlr);
+#else
+ sz->trailer_size = 0;
+#endif
+
+ /* element size is 8 bytes-aligned at least */
+ sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
+
+ /* expand trailer to next cache line */
+ if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
+ sz->total_size = sz->header_size + sz->elt_size +
+ sz->trailer_size;
+ sz->trailer_size += ((RTE_MEMPOOL_ALIGN -
+ (sz->total_size & RTE_MEMPOOL_ALIGN_MASK)) &
+ RTE_MEMPOOL_ALIGN_MASK);
+ }
+
+ /*
+ * increase trailer to add padding between objects in order to
+ * spread them across memory channels/ranks
+ */
+ if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
+ unsigned new_size;
+ new_size = optimize_object_size(sz->header_size + sz->elt_size +
+ sz->trailer_size);
+ sz->trailer_size = new_size - sz->header_size - sz->elt_size;
+ }
+
+ /* this is the size of an object, including header and trailer */
+ sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
+
+ return sz->total_size;
+}
+
+
+/*
+ * Internal function to calculate required memory chunk size shared
+ * by default implementation of the corresponding callback and
+ * deprecated external function.
+ */
+size_t
+rte_mempool_calc_mem_size_helper(uint32_t elt_num, size_t total_elt_sz,
+ uint32_t pg_shift)
+{
+ size_t obj_per_page, pg_num, pg_sz;
+
+ if (total_elt_sz == 0)
+ return 0;
+
+ if (pg_shift == 0)
+ return total_elt_sz * elt_num;
+
+ pg_sz = (size_t)1 << pg_shift;
+ obj_per_page = pg_sz / total_elt_sz;
+ if (obj_per_page == 0)
+ return RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * elt_num;
+
+ pg_num = (elt_num + obj_per_page - 1) / obj_per_page;
+ return pg_num << pg_shift;
+}
+
+/*
+ * Calculate maximum amount of memory required to store given number of objects.
+ */
+size_t
+rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift,
+ __rte_unused unsigned int flags)
+{
+ return rte_mempool_calc_mem_size_helper(elt_num, total_elt_sz,
+ pg_shift);
+}
+
+/*
+ * Calculate how much memory would be actually required with the
+ * given memory footprint to store required number of elements.
+ */
+ssize_t
+rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num,
+ size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
+ uint32_t pg_shift, __rte_unused unsigned int flags)
+{
+ uint32_t elt_cnt = 0;
+ rte_iova_t start, end;
+ uint32_t iova_idx;
+ size_t pg_sz = (size_t)1 << pg_shift;
+
+ /* if iova is NULL, assume contiguous memory */
+ if (iova == NULL) {
+ start = 0;
+ end = pg_sz * pg_num;
+ iova_idx = pg_num;
+ } else {
+ start = iova[0];
+ end = iova[0] + pg_sz;
+ iova_idx = 1;
+ }
+ while (elt_cnt < elt_num) {
+
+ if (end - start >= total_elt_sz) {
+ /* enough contiguous memory, add an object */
+ start += total_elt_sz;
+ elt_cnt++;
+ } else if (iova_idx < pg_num) {
+ /* no room to store one obj, add a page */
+ if (end == iova[iova_idx]) {
+ end += pg_sz;
+ } else {
+ start = iova[iova_idx];
+ end = iova[iova_idx] + pg_sz;
+ }
+ iova_idx++;
+
+ } else {
+ /* no more page, return how many elements fit */
+ return -(size_t)elt_cnt;
+ }
+ }
+
+ return (size_t)iova_idx << pg_shift;
+}
+
+/* free a memchunk allocated with rte_memzone_reserve() */
+static void
+rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,
+ void *opaque)
+{
+ const struct rte_memzone *mz = opaque;
+ rte_memzone_free(mz);
+}
+
+/* Free memory chunks used by a mempool. Objects must be in pool */
+static void
+rte_mempool_free_memchunks(struct rte_mempool *mp)
+{
+ struct rte_mempool_memhdr *memhdr;
+ void *elt;
+
+ while (!STAILQ_EMPTY(&mp->elt_list)) {
+ rte_mempool_ops_dequeue_bulk(mp, &elt, 1);
+ (void)elt;
+ STAILQ_REMOVE_HEAD(&mp->elt_list, next);
+ mp->populated_size--;
+ }
+
+ while (!STAILQ_EMPTY(&mp->mem_list)) {
+ memhdr = STAILQ_FIRST(&mp->mem_list);
+ STAILQ_REMOVE_HEAD(&mp->mem_list, next);
+ if (memhdr->free_cb != NULL)
+ memhdr->free_cb(memhdr, memhdr->opaque);
+ rte_free(memhdr);
+ mp->nb_mem_chunks--;
+ }
+}
+
+static int
+mempool_ops_alloc_once(struct rte_mempool *mp)
+{
+ int ret;
+
+ /* create the internal ring if not already done */
+ if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
+ ret = rte_mempool_ops_alloc(mp);
+ if (ret != 0)
+ return ret;
+ mp->flags |= MEMPOOL_F_POOL_CREATED;
+ }
+ return 0;
+}
+
+/* Add objects in the pool, using a physically contiguous memory
+ * zone. Return the number of objects added, or a negative value
+ * on error.
+ */
+int
+rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ unsigned i = 0;
+ size_t off;
+ struct rte_mempool_memhdr *memhdr;
+ int ret;
+
+ ret = mempool_ops_alloc_once(mp);
+ if (ret != 0)
+ return ret;
+
+ /* mempool is already populated */
+ if (mp->populated_size >= mp->size)
+ return -ENOSPC;
+
+ memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
+ if (memhdr == NULL)
+ return -ENOMEM;
+
+ memhdr->mp = mp;
+ memhdr->addr = vaddr;
+ memhdr->iova = iova;
+ memhdr->len = len;
+ memhdr->free_cb = free_cb;
+ memhdr->opaque = opaque;
+
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
+ else
+ off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr;
+
+ if (off > len) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size,
+ (char *)vaddr + off,
+ (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off),
+ len - off, mempool_add_elem, NULL);
+
+ /* not enough room to store one object */
+ if (i == 0) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
+ mp->nb_mem_chunks++;
+ return i;
+
+fail:
+ rte_free(memhdr);
+ return ret;
+}
+
+int
+rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
+ phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ return rte_mempool_populate_iova(mp, vaddr, paddr, len, free_cb, opaque);
+}
+
+/* Add objects in the pool, using a table of physical pages. Return the
+ * number of objects added, or a negative value on error.
+ */
+int
+rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
+ const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
+{
+ uint32_t i, n;
+ int ret, cnt = 0;
+ size_t pg_sz = (size_t)1 << pg_shift;
+
+ /* mempool must not be populated */
+ if (mp->nb_mem_chunks != 0)
+ return -EEXIST;
+
+ if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
+ return rte_mempool_populate_iova(mp, vaddr, RTE_BAD_IOVA,
+ pg_num * pg_sz, free_cb, opaque);
+
+ for (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) {
+
+ /* populate with the largest group of contiguous pages */
+ for (n = 1; (i + n) < pg_num &&
+ iova[i + n - 1] + pg_sz == iova[i + n]; n++)
+ ;
+
+ ret = rte_mempool_populate_iova(mp, vaddr + i * pg_sz,
+ iova[i], n * pg_sz, free_cb, opaque);
+ if (ret < 0) {
+ rte_mempool_free_memchunks(mp);
+ return ret;
+ }
+ /* no need to call the free callback for next chunks */
+ free_cb = NULL;
+ cnt += ret;
+ }
+ return cnt;
+}
+
+int
+rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
+{
+ return rte_mempool_populate_iova_tab(mp, vaddr, paddr, pg_num, pg_shift,
+ free_cb, opaque);
+}
+
+/* Populate the mempool with a virtual area. Return the number of
+ * objects added, or a negative value on error.
+ */
+int
+rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)