+
+/*
+ * Internal function to calculate required memory chunk size shared
+ * by default implementation of the corresponding callback and
+ * deprecated external function.
+ */
+size_t
+rte_mempool_calc_mem_size_helper(uint32_t elt_num, size_t total_elt_sz,
+ uint32_t pg_shift)
+{
+ size_t obj_per_page, pg_num, pg_sz;
+
+ if (total_elt_sz == 0)
+ return 0;
+
+ if (pg_shift == 0)
+ return total_elt_sz * elt_num;
+
+ pg_sz = (size_t)1 << pg_shift;
+ obj_per_page = pg_sz / total_elt_sz;
+ if (obj_per_page == 0)
+ return RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * elt_num;
+
+ pg_num = (elt_num + obj_per_page - 1) / obj_per_page;
+ return pg_num << pg_shift;
+}
+
+/*
+ * Calculate maximum amount of memory required to store given number of objects.
+ */
+size_t
+rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift,
+ __rte_unused unsigned int flags)
+{
+ return rte_mempool_calc_mem_size_helper(elt_num, total_elt_sz,
+ pg_shift);
+}
+
+/*
+ * Calculate how much memory would be actually required with the
+ * given memory footprint to store required number of elements.
+ */
+ssize_t
+rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num,
+ size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
+ uint32_t pg_shift, __rte_unused unsigned int flags)
+{
+ uint32_t elt_cnt = 0;
+ rte_iova_t start, end;
+ uint32_t iova_idx;
+ size_t pg_sz = (size_t)1 << pg_shift;
+
+ /* if iova is NULL, assume contiguous memory */
+ if (iova == NULL) {
+ start = 0;
+ end = pg_sz * pg_num;
+ iova_idx = pg_num;
+ } else {
+ start = iova[0];
+ end = iova[0] + pg_sz;
+ iova_idx = 1;
+ }
+ while (elt_cnt < elt_num) {
+
+ if (end - start >= total_elt_sz) {
+ /* enough contiguous memory, add an object */
+ start += total_elt_sz;
+ elt_cnt++;
+ } else if (iova_idx < pg_num) {
+ /* no room to store one obj, add a page */
+ if (end == iova[iova_idx]) {
+ end += pg_sz;
+ } else {
+ start = iova[iova_idx];
+ end = iova[iova_idx] + pg_sz;
+ }
+ iova_idx++;
+
+ } else {
+ /* no more page, return how many elements fit */
+ return -(size_t)elt_cnt;
+ }
+ }
+
+ return (size_t)iova_idx << pg_shift;
+}
+
+/* free a memchunk allocated with rte_memzone_reserve() */
+static void
+rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,
+ void *opaque)
+{
+ const struct rte_memzone *mz = opaque;
+ rte_memzone_free(mz);
+}
+
+/* Free memory chunks used by a mempool. Objects must be in pool */
+static void
+rte_mempool_free_memchunks(struct rte_mempool *mp)
+{
+ struct rte_mempool_memhdr *memhdr;
+ void *elt;
+
+ while (!STAILQ_EMPTY(&mp->elt_list)) {
+ rte_mempool_ops_dequeue_bulk(mp, &elt, 1);
+ (void)elt;
+ STAILQ_REMOVE_HEAD(&mp->elt_list, next);
+ mp->populated_size--;
+ }
+
+ while (!STAILQ_EMPTY(&mp->mem_list)) {
+ memhdr = STAILQ_FIRST(&mp->mem_list);
+ STAILQ_REMOVE_HEAD(&mp->mem_list, next);
+ if (memhdr->free_cb != NULL)
+ memhdr->free_cb(memhdr, memhdr->opaque);
+ rte_free(memhdr);
+ mp->nb_mem_chunks--;
+ }
+}
+
+static int
+mempool_ops_alloc_once(struct rte_mempool *mp)
+{
+ int ret;
+
+ /* create the internal ring if not already done */
+ if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
+ ret = rte_mempool_ops_alloc(mp);
+ if (ret != 0)
+ return ret;
+ mp->flags |= MEMPOOL_F_POOL_CREATED;
+ }
+ return 0;
+}
+
+/* Add objects in the pool, using a physically contiguous memory
+ * zone. Return the number of objects added, or a negative value
+ * on error.
+ */
+int
+rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ unsigned i = 0;
+ size_t off;
+ struct rte_mempool_memhdr *memhdr;
+ int ret;
+
+ ret = mempool_ops_alloc_once(mp);
+ if (ret != 0)
+ return ret;
+
+ /* mempool is already populated */
+ if (mp->populated_size >= mp->size)
+ return -ENOSPC;
+
+ memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
+ if (memhdr == NULL)
+ return -ENOMEM;
+
+ memhdr->mp = mp;
+ memhdr->addr = vaddr;
+ memhdr->iova = iova;
+ memhdr->len = len;
+ memhdr->free_cb = free_cb;
+ memhdr->opaque = opaque;
+
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
+ else
+ off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr;
+
+ if (off > len) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size,
+ (char *)vaddr + off,
+ (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off),
+ len - off, mempool_add_elem, NULL);
+
+ /* not enough room to store one object */
+ if (i == 0) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
+ mp->nb_mem_chunks++;
+ return i;
+
+fail:
+ rte_free(memhdr);
+ return ret;
+}
+
+int
+rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
+ phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ return rte_mempool_populate_iova(mp, vaddr, paddr, len, free_cb, opaque);
+}
+
+/* Add objects in the pool, using a table of physical pages. Return the
+ * number of objects added, or a negative value on error.
+ */
+int
+rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
+ const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
+{
+ uint32_t i, n;
+ int ret, cnt = 0;
+ size_t pg_sz = (size_t)1 << pg_shift;
+
+ /* mempool must not be populated */
+ if (mp->nb_mem_chunks != 0)
+ return -EEXIST;
+
+ if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
+ return rte_mempool_populate_iova(mp, vaddr, RTE_BAD_IOVA,
+ pg_num * pg_sz, free_cb, opaque);
+
+ for (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) {
+
+ /* populate with the largest group of contiguous pages */
+ for (n = 1; (i + n) < pg_num &&
+ iova[i + n - 1] + pg_sz == iova[i + n]; n++)
+ ;
+
+ ret = rte_mempool_populate_iova(mp, vaddr + i * pg_sz,
+ iova[i], n * pg_sz, free_cb, opaque);
+ if (ret < 0) {
+ rte_mempool_free_memchunks(mp);
+ return ret;
+ }
+ /* no need to call the free callback for next chunks */
+ free_cb = NULL;
+ cnt += ret;
+ }
+ return cnt;
+}
+
+int
+rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
+{
+ return rte_mempool_populate_iova_tab(mp, vaddr, paddr, pg_num, pg_shift,
+ free_cb, opaque);
+}
+
+/* Populate the mempool with a virtual area. Return the number of
+ * objects added, or a negative value on error.
+ */
+int
+rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ rte_iova_t iova;
+ size_t off, phys_len;
+ int ret, cnt = 0;
+
+ /* address and len must be page-aligned */
+ if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr)
+ return -EINVAL;
+ if (RTE_ALIGN_CEIL(len, pg_sz) != len)
+ return -EINVAL;
+
+ if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
+ return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
+ len, free_cb, opaque);
+
+ for (off = 0; off + pg_sz <= len &&
+ mp->populated_size < mp->size; off += phys_len) {
+
+ iova = rte_mem_virt2iova(addr + off);
+
+ if (iova == RTE_BAD_IOVA && rte_eal_has_hugepages()) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* populate with the largest group of contiguous pages */
+ for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {
+ rte_iova_t iova_tmp;
+
+ iova_tmp = rte_mem_virt2iova(addr + off + phys_len);
+
+ if (iova_tmp != iova + phys_len)
+ break;
+ }
+
+ ret = rte_mempool_populate_iova(mp, addr + off, iova,
+ phys_len, free_cb, opaque);
+ if (ret < 0)
+ goto fail;
+ /* no need to call the free callback for next chunks */
+ free_cb = NULL;
+ cnt += ret;
+ }
+
+ return cnt;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return ret;
+}
+
+/* Default function to populate the mempool: allocate memory in memzones,
+ * and populate them. Return the number of objects added, or a negative
+ * value on error.
+ */
+int
+rte_mempool_populate_default(struct rte_mempool *mp)
+{
+ unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ ssize_t mem_size;
+ size_t align, pg_sz, pg_shift;
+ rte_iova_t iova;
+ unsigned mz_id, n;
+ int ret;
+ bool no_contig, try_contig, no_pageshift;
+
+ ret = mempool_ops_alloc_once(mp);
+ if (ret != 0)
+ return ret;
+
+ /* mempool must not be populated */
+ if (mp->nb_mem_chunks != 0)
+ return -EEXIST;
+
+ no_contig = mp->flags & MEMPOOL_F_NO_IOVA_CONTIG;
+
+ /*
+ * the following section calculates page shift and page size values.
+ *
+ * these values impact the result of calc_mem_size operation, which
+ * returns the amount of memory that should be allocated to store the
+ * desired number of objects. when not zero, it allocates more memory
+ * for the padding between objects, to ensure that an object does not
+ * cross a page boundary. in other words, page size/shift are to be set
+ * to zero if mempool elements won't care about page boundaries.
+ * there are several considerations for page size and page shift here.
+ *
+ * if we don't need our mempools to have physically contiguous objects,
+ * then just set page shift and page size to 0, because the user has
+ * indicated that there's no need to care about anything.
+ *
+ * if we do need contiguous objects, there is also an option to reserve
+ * the entire mempool memory as one contiguous block of memory, in
+ * which case the page shift and alignment wouldn't matter as well.
+ *
+ * if we require contiguous objects, but not necessarily the entire
+ * mempool reserved space to be contiguous, then there are two options.
+ *
+ * if our IO addresses are virtual, not actual physical (IOVA as VA
+ * case), then no page shift needed - our memory allocation will give us
+ * contiguous IO memory as far as the hardware is concerned, so
+ * act as if we're getting contiguous memory.
+ *
+ * if our IO addresses are physical, we may get memory from bigger
+ * pages, or we might get memory from smaller pages, and how much of it
+ * we require depends on whether we want bigger or smaller pages.
+ * However, requesting each and every memory size is too much work, so
+ * what we'll do instead is walk through the page sizes available, pick
+ * the smallest one and set up page shift to match that one. We will be
+ * wasting some space this way, but it's much nicer than looping around
+ * trying to reserve each and every page size.
+ *
+ * However, since size calculation will produce page-aligned sizes, it
+ * makes sense to first try and see if we can reserve the entire memzone
+ * in one contiguous chunk as well (otherwise we might end up wasting a
+ * 1G page on a 10MB memzone). If we fail to get enough contiguous
+ * memory, then we'll go and reserve space page-by-page.
+ */
+ no_pageshift = no_contig || rte_eal_iova_mode() == RTE_IOVA_VA;
+ try_contig = !no_contig && !no_pageshift && rte_eal_has_hugepages();
+
+ if (no_pageshift) {
+ pg_sz = 0;
+ pg_shift = 0;
+ } else if (try_contig) {
+ pg_sz = get_min_page_size();
+ pg_shift = rte_bsf32(pg_sz);
+ } else {
+ pg_sz = getpagesize();
+ pg_shift = rte_bsf32(pg_sz);
+ }
+
+ for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
+ size_t min_chunk_size;
+ unsigned int flags;
+
+ if (try_contig || no_pageshift)
+ mem_size = rte_mempool_ops_calc_mem_size(mp, n,
+ 0, &min_chunk_size, &align);
+ else
+ mem_size = rte_mempool_ops_calc_mem_size(mp, n,
+ pg_shift, &min_chunk_size, &align);
+
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto fail;
+ }
+
+ ret = snprintf(mz_name, sizeof(mz_name),
+ RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ ret = -ENAMETOOLONG;
+ goto fail;
+ }
+
+ flags = mz_flags;
+
+ /* if we're trying to reserve contiguous memory, add appropriate
+ * memzone flag.
+ */
+ if (try_contig)
+ flags |= RTE_MEMZONE_IOVA_CONTIG;
+
+ mz = rte_memzone_reserve_aligned(mz_name, mem_size,
+ mp->socket_id, flags, align);
+
+ /* if we were trying to allocate contiguous memory, failed and
+ * minimum required contiguous chunk fits minimum page, adjust
+ * memzone size to the page size, and try again.
+ */
+ if (mz == NULL && try_contig && min_chunk_size <= pg_sz) {
+ try_contig = false;
+ flags &= ~RTE_MEMZONE_IOVA_CONTIG;
+
+ mem_size = rte_mempool_ops_calc_mem_size(mp, n,
+ pg_shift, &min_chunk_size, &align);
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto fail;
+ }
+
+ mz = rte_memzone_reserve_aligned(mz_name, mem_size,
+ mp->socket_id, flags, align);
+ }
+ /* don't try reserving with 0 size if we were asked to reserve
+ * IOVA-contiguous memory.
+ */
+ if (min_chunk_size < (size_t)mem_size && mz == NULL) {
+ /* not enough memory, retry with the biggest zone we
+ * have
+ */
+ mz = rte_memzone_reserve_aligned(mz_name, 0,
+ mp->socket_id, flags,
+ RTE_MAX(pg_sz, align));
+ }
+ if (mz == NULL) {
+ ret = -rte_errno;
+ goto fail;
+ }
+
+ if (mz->len < min_chunk_size) {
+ rte_memzone_free(mz);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (no_contig)
+ iova = RTE_BAD_IOVA;
+ else
+ iova = mz->iova;
+
+ if (no_pageshift || try_contig)
+ ret = rte_mempool_populate_iova(mp, mz->addr,
+ iova, mz->len,
+ rte_mempool_memchunk_mz_free,
+ (void *)(uintptr_t)mz);
+ else
+ ret = rte_mempool_populate_virt(mp, mz->addr,
+ RTE_ALIGN_FLOOR(mz->len, pg_sz), pg_sz,
+ rte_mempool_memchunk_mz_free,
+ (void *)(uintptr_t)mz);
+ if (ret < 0) {
+ rte_memzone_free(mz);
+ goto fail;
+ }
+ }
+
+ return mp->size;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return ret;
+}
+
+/* return the memory size required for mempool objects in anonymous mem */
+static ssize_t
+get_anon_size(const struct rte_mempool *mp)
+{
+ ssize_t size;
+ size_t pg_sz, pg_shift;
+ size_t min_chunk_size;
+ size_t align;
+
+ pg_sz = getpagesize();
+ pg_shift = rte_bsf32(pg_sz);
+ size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
+ &min_chunk_size, &align);
+
+ return size;
+}
+
+/* unmap a memory zone mapped by rte_mempool_populate_anon() */
+static void
+rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr,
+ void *opaque)
+{
+ ssize_t size;