+
+ need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+ ret = rte_mempool_get_page_size(mp, &pg_sz);
+ if (ret < 0)
+ return ret;
+
+ if (pg_sz != 0)
+ pg_shift = rte_bsf32(pg_sz);
+
+ for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
+ size_t min_chunk_size;
+
+ mem_size = rte_mempool_ops_calc_mem_size(
+ mp, n, pg_shift, &min_chunk_size, &align);
+
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto fail;
+ }
+
+ ret = snprintf(mz_name, sizeof(mz_name),
+ RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ ret = -ENAMETOOLONG;
+ goto fail;
+ }
+
+ /* if we're trying to reserve contiguous memory, add appropriate
+ * memzone flag.
+ */
+ if (min_chunk_size == (size_t)mem_size)
+ mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
+
+ /* Allocate a memzone, retrying with a smaller area on ENOMEM */
+ do {
+ mz = rte_memzone_reserve_aligned(mz_name,
+ RTE_MIN((size_t)mem_size, max_alloc_size),
+ mp->socket_id, mz_flags, align);
+
+ if (mz != NULL || rte_errno != ENOMEM)
+ break;
+
+ max_alloc_size = RTE_MIN(max_alloc_size,
+ (size_t)mem_size) / 2;
+ } while (mz == NULL && max_alloc_size >= min_chunk_size);
+
+ if (mz == NULL) {
+ ret = -rte_errno;
+ goto fail;
+ }
+
+ if (need_iova_contig_obj)
+ iova = mz->iova;
+ else
+ iova = RTE_BAD_IOVA;
+
+ if (pg_sz == 0 || (mz_flags & RTE_MEMZONE_IOVA_CONTIG))
+ ret = rte_mempool_populate_iova(mp, mz->addr,
+ iova, mz->len,
+ rte_mempool_memchunk_mz_free,
+ (void *)(uintptr_t)mz);
+ else
+ ret = rte_mempool_populate_virt(mp, mz->addr,
+ mz->len, pg_sz,
+ rte_mempool_memchunk_mz_free,
+ (void *)(uintptr_t)mz);
+ if (ret == 0) /* should not happen */
+ ret = -ENOBUFS;
+ if (ret < 0) {
+ rte_memzone_free(mz);
+ goto fail;
+ }
+ }
+
+ rte_mempool_trace_populate_default(mp);
+ return mp->size;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return ret;
+}
+
+/* return the memory size required for mempool objects in anonymous mem */
+static ssize_t
+get_anon_size(const struct rte_mempool *mp)
+{
+ ssize_t size;
+ size_t pg_sz, pg_shift;
+ size_t min_chunk_size;
+ size_t align;
+
+ pg_sz = rte_mem_page_size();
+ pg_shift = rte_bsf32(pg_sz);
+ size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
+ &min_chunk_size, &align);
+
+ return size;
+}
+
+/* unmap a memory zone mapped by rte_mempool_populate_anon() */
+static void
+rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr,
+ void *opaque)
+{
+ ssize_t size;
+
+ /*
+ * Calculate size since memhdr->len has contiguous chunk length
+ * which may be smaller if anon map is split into many contiguous
+ * chunks. Result must be the same as we calculated on populate.
+ */
+ size = get_anon_size(memhdr->mp);
+ if (size < 0)
+ return;
+
+ rte_mem_unmap(opaque, size);
+}
+
+/* populate the mempool with an anonymous mapping */
+int
+rte_mempool_populate_anon(struct rte_mempool *mp)
+{
+ ssize_t size;
+ int ret;
+ char *addr;
+
+ /* mempool is already populated, error */
+ if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ ret = mempool_ops_alloc_once(mp);
+ if (ret < 0) {
+ rte_errno = -ret;
+ return 0;
+ }
+
+ size = get_anon_size(mp);
+ if (size < 0) {
+ rte_errno = -size;
+ return 0;
+ }
+
+ /* get chunk of virtually continuous memory */
+ addr = rte_mem_map(NULL, size, RTE_PROT_READ | RTE_PROT_WRITE,
+ RTE_MAP_SHARED | RTE_MAP_ANONYMOUS, -1, 0);
+ if (addr == NULL)
+ return 0;
+ /* can't use MMAP_LOCKED, it does not exist on BSD */
+ if (rte_mem_lock(addr, size) < 0) {
+ rte_mem_unmap(addr, size);
+ return 0;
+ }
+
+ ret = rte_mempool_populate_virt(mp, addr, size, rte_mem_page_size(),
+ rte_mempool_memchunk_anon_free, addr);
+ if (ret == 0) /* should not happen */
+ ret = -ENOBUFS;
+ if (ret < 0) {
+ rte_errno = -ret;
+ goto fail;
+ }
+
+ rte_mempool_trace_populate_anon(mp);
+ return mp->populated_size;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return 0;
+}
+
+/* free a mempool */
+void
+rte_mempool_free(struct rte_mempool *mp)
+{
+ struct rte_mempool_list *mempool_list = NULL;
+ struct rte_tailq_entry *te;
+
+ if (mp == NULL)
+ return;
+
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+ rte_mcfg_tailq_write_lock();
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, mempool_list, next) {
+ if (te->data == (void *)mp)
+ break;
+ }
+
+ if (te != NULL) {
+ TAILQ_REMOVE(mempool_list, te, next);
+ rte_free(te);
+ }
+ rte_mcfg_tailq_write_unlock();
+
+ rte_mempool_trace_free(mp);
+ rte_mempool_free_memchunks(mp);
+ rte_mempool_ops_free(mp);
+ rte_memzone_free(mp->mz);
+}
+
+static void
+mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size)
+{
+ cache->size = size;
+ cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size);
+ cache->len = 0;
+}
+
+/*
+ * Create and initialize a cache for objects that are retrieved from and
+ * returned to an underlying mempool. This structure is identical to the
+ * local_cache[lcore_id] pointed to by the mempool structure.
+ */
+struct rte_mempool_cache *
+rte_mempool_cache_create(uint32_t size, int socket_id)
+{
+ struct rte_mempool_cache *cache;
+
+ if (size == 0 || size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ cache = rte_zmalloc_socket("MEMPOOL_CACHE", sizeof(*cache),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cache == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot allocate mempool cache.\n");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ mempool_cache_init(cache, size);
+
+ rte_mempool_trace_cache_create(size, socket_id, cache);
+ return cache;
+}
+
+/*
+ * Free a cache. It's the responsibility of the user to make sure that any
+ * remaining objects in the cache are flushed to the corresponding
+ * mempool.
+ */
+void
+rte_mempool_cache_free(struct rte_mempool_cache *cache)
+{
+ rte_mempool_trace_cache_free(cache);
+ rte_free(cache);
+}
+
+/* create an empty mempool */
+struct rte_mempool *
+rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ int socket_id, unsigned flags)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ struct rte_mempool_list *mempool_list;
+ struct rte_mempool *mp = NULL;
+ struct rte_tailq_entry *te = NULL;
+ const struct rte_memzone *mz = NULL;
+ size_t mempool_size;
+ unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
+ struct rte_mempool_objsz objsz;
+ unsigned lcore_id;
+ int ret;
+
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
+ RTE_CACHE_LINE_MASK) != 0);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+#endif
+
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+
+ /* asked for zero items */
+ if (n == 0) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* asked cache too big */
+ if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
+ CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* "no cache align" imply "no spread" */
+ if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ flags |= MEMPOOL_F_NO_SPREAD;
+
+ /* calculate mempool object sizes. */
+ if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ rte_mcfg_mempool_write_lock();
+
+ /*
+ * reserve a memory zone for this mempool: private data is
+ * cache-aligned
+ */
+ private_data_size = (private_data_size +
+ RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
+
+
+ /* try to allocate tailq entry */
+ te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
+ goto exit_unlock;
+ }
+
+ mempool_size = MEMPOOL_HEADER_SIZE(mp, cache_size);
+ mempool_size += private_data_size;
+ mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
+
+ ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ rte_errno = ENAMETOOLONG;
+ goto exit_unlock;
+ }
+
+ mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);