+
+ return mp->size;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return ret;
+}
+
+/* return the memory size required for mempool objects in anonymous mem */
+static ssize_t
+get_anon_size(const struct rte_mempool *mp)
+{
+ ssize_t size;
+ size_t pg_sz, pg_shift;
+ size_t min_chunk_size;
+ size_t align;
+
+ pg_sz = getpagesize();
+ pg_shift = rte_bsf32(pg_sz);
+ size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
+ &min_chunk_size, &align);
+
+ return size;
+}
+
+/* unmap a memory zone mapped by rte_mempool_populate_anon() */
+static void
+rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr,
+ void *opaque)
+{
+ ssize_t size;
+
+ /*
+ * Calculate size since memhdr->len has contiguous chunk length
+ * which may be smaller if anon map is split into many contiguous
+ * chunks. Result must be the same as we calculated on populate.
+ */
+ size = get_anon_size(memhdr->mp);
+ if (size < 0)
+ return;
+
+ munmap(opaque, size);
+}
+
+/* populate the mempool with an anonymous mapping */
+int
+rte_mempool_populate_anon(struct rte_mempool *mp)
+{
+ ssize_t size;
+ int ret;
+ char *addr;
+
+ /* mempool is already populated, error */
+ if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ ret = mempool_ops_alloc_once(mp);
+ if (ret != 0)
+ return ret;
+
+ size = get_anon_size(mp);
+ if (size < 0) {
+ rte_errno = -size;
+ return 0;
+ }
+
+ /* get chunk of virtually continuous memory */
+ addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ rte_errno = errno;
+ return 0;
+ }
+ /* can't use MMAP_LOCKED, it does not exist on BSD */
+ if (mlock(addr, size) < 0) {
+ rte_errno = errno;
+ munmap(addr, size);
+ return 0;
+ }
+
+ ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(),
+ rte_mempool_memchunk_anon_free, addr);
+ if (ret == 0)
+ goto fail;
+
+ return mp->populated_size;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return 0;
+}
+
+/* free a mempool */
+void
+rte_mempool_free(struct rte_mempool *mp)
+{
+ struct rte_mempool_list *mempool_list = NULL;
+ struct rte_tailq_entry *te;
+
+ if (mp == NULL)
+ return;
+
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+ rte_mcfg_tailq_write_lock();
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, mempool_list, next) {
+ if (te->data == (void *)mp)
+ break;
+ }
+
+ if (te != NULL) {
+ TAILQ_REMOVE(mempool_list, te, next);
+ rte_free(te);
+ }
+ rte_mcfg_tailq_write_unlock();
+
+ rte_mempool_free_memchunks(mp);
+ rte_mempool_ops_free(mp);
+ rte_memzone_free(mp->mz);
+}
+
+static void
+mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size)
+{
+ cache->size = size;
+ cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size);
+ cache->len = 0;
+}
+
+/*
+ * Create and initialize a cache for objects that are retrieved from and
+ * returned to an underlying mempool. This structure is identical to the
+ * local_cache[lcore_id] pointed to by the mempool structure.
+ */
+struct rte_mempool_cache *
+rte_mempool_cache_create(uint32_t size, int socket_id)
+{
+ struct rte_mempool_cache *cache;
+
+ if (size == 0 || size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ cache = rte_zmalloc_socket("MEMPOOL_CACHE", sizeof(*cache),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cache == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot allocate mempool cache.\n");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ mempool_cache_init(cache, size);
+
+ return cache;