+BIND_DEFAULT_SYMBOL(rte_mempool_populate_iova, _v21, 21);
+MAP_STATIC_SYMBOL(
+ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len,
+ rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque),
+ rte_mempool_populate_iova_v21);
+
+__vsym int
+rte_mempool_populate_iova_v20(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+__vsym int
+rte_mempool_populate_iova_v20(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ int ret;
+
+ ret = rte_mempool_populate_iova_v21(mp, vaddr, iova, len, free_cb,
+ opaque);
+ if (ret == 0)
+ ret = -EINVAL;
+
+ return ret;
+}
+VERSION_SYMBOL(rte_mempool_populate_iova, _v20, 20.0);
+
+static rte_iova_t
+get_iova(void *addr)
+{
+ struct rte_memseg *ms;
+
+ /* try registered memory first */
+ ms = rte_mem_virt2memseg(addr, NULL);
+ if (ms == NULL || ms->iova == RTE_BAD_IOVA)
+ /* fall back to actual physical address */
+ return rte_mem_virt2iova(addr);
+ return ms->iova + RTE_PTR_DIFF(addr, ms->addr);
+}
+
+__vsym int
+rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+/* Populate the mempool with a virtual area. Return the number of
+ * objects added, or a negative value on error.
+ */
+__vsym int
+rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ rte_iova_t iova;
+ size_t off, phys_len;
+ int ret, cnt = 0;
+
+ if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
+ return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
+ len, free_cb, opaque);
+
+ for (off = 0; off < len &&
+ mp->populated_size < mp->size; off += phys_len) {
+
+ iova = get_iova(addr + off);
+
+ /* populate with the largest group of contiguous pages */
+ for (phys_len = RTE_MIN(
+ (size_t)(RTE_PTR_ALIGN_CEIL(addr + off + 1, pg_sz) -
+ (addr + off)),
+ len - off);
+ off + phys_len < len;
+ phys_len = RTE_MIN(phys_len + pg_sz, len - off)) {
+ rte_iova_t iova_tmp;
+
+ iova_tmp = get_iova(addr + off + phys_len);
+
+ if (iova_tmp == RTE_BAD_IOVA ||
+ iova_tmp != iova + phys_len)
+ break;
+ }
+
+ ret = rte_mempool_populate_iova_v21(mp, addr + off, iova,
+ phys_len, free_cb, opaque);
+ if (ret == 0)
+ continue;
+ if (ret < 0)
+ goto fail;
+ /* no need to call the free callback for next chunks */
+ free_cb = NULL;
+ cnt += ret;
+ }
+
+ rte_mempool_trace_populate_virt(mp, addr, len, pg_sz, free_cb, opaque);
+ return cnt;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return ret;
+}
+BIND_DEFAULT_SYMBOL(rte_mempool_populate_virt, _v21, 21);
+MAP_STATIC_SYMBOL(
+ int rte_mempool_populate_virt(struct rte_mempool *mp,
+ char *addr, size_t len, size_t pg_sz,
+ rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque),
+ rte_mempool_populate_virt_v21);
+
+__vsym int
+rte_mempool_populate_virt_v20(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+__vsym int
+rte_mempool_populate_virt_v20(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ int ret;
+
+ ret = rte_mempool_populate_virt_v21(mp, addr, len, pg_sz,
+ free_cb, opaque);
+
+ if (ret == 0)
+ ret = -EINVAL;
+
+ return ret;
+}
+VERSION_SYMBOL(rte_mempool_populate_virt, _v20, 20.0);
+
+/* Get the minimal page size used in a mempool before populating it. */
+int
+rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
+{
+ bool need_iova_contig_obj;
+ bool alloc_in_ext_mem;
+ int ret;
+
+ /* check if we can retrieve a valid socket ID */
+ ret = rte_malloc_heap_socket_is_external(mp->socket_id);
+ if (ret < 0)
+ return -EINVAL;
+ alloc_in_ext_mem = (ret == 1);
+ need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+
+ if (!need_iova_contig_obj)
+ *pg_sz = 0;
+ else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
+ *pg_sz = get_min_page_size(mp->socket_id);
+ else
+ *pg_sz = getpagesize();
+
+ rte_mempool_trace_get_page_size(mp, *pg_sz);
+ return 0;
+}
+
+/* Default function to populate the mempool: allocate memory in memzones,
+ * and populate them. Return the number of objects added, or a negative
+ * value on error.