+static void
+mempool_add_elem(struct rte_mempool *mp, void *obj, uint32_t obj_idx,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg)
+{
+ struct rte_mempool **mpp;
+
+ obj = (char *)obj + mp->header_size;
+
+ /* set mempool ptr in header */
+ mpp = __mempool_from_obj(obj);
+ *mpp = mp;
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ __mempool_write_header_cookie(obj, 1);
+ __mempool_write_trailer_cookie(obj);
+#endif
+ /* call the initializer */
+ if (obj_init)
+ obj_init(mp, obj_init_arg, obj, obj_idx);
+
+ /* enqueue in ring */
+ rte_ring_sp_enqueue(mp->ring, obj);
+}
+
+uint32_t
+rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t elt_sz, size_t align,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg)
+{
+ uint32_t i, j, k;
+ uint32_t pgn;
+ uintptr_t end, start, va;
+ uintptr_t pg_sz;
+
+ pg_sz = (uintptr_t)1 << pg_shift;
+ va = (uintptr_t)vaddr;
+
+ i = 0;
+ j = 0;
+
+ while (i != elt_num && j != pg_num) {
+
+ start = RTE_ALIGN_CEIL(va, align);
+ end = start + elt_sz;
+
+ pgn = (end >> pg_shift) - (start >> pg_shift);
+ pgn += j;
+
+ /* do we have enough space left for the next element. */
+ if (pgn >= pg_num)
+ break;
+
+ for (k = j;
+ k != pgn &&
+ paddr[k] + pg_sz == paddr[k + 1];
+ k++)
+ ;
+
+ /*
+ * if next pgn chunks of memory physically continuous,
+ * use it to create next element.
+ * otherwise, just skip that chunk unused.
+ */
+ if (k == pgn) {
+ if (obj_iter != NULL)
+ obj_iter(obj_iter_arg, (void *)start,
+ (void *)end, i);
+ va = end;
+ j = pgn;
+ i++;
+ } else {
+ va = RTE_ALIGN_CEIL((va + 1), pg_sz);
+ j++;
+ }
+ }
+
+ return (i);
+}
+
+/*
+ * Populate mempool with the objects.
+ */
+
+struct mempool_populate_arg {
+ struct rte_mempool *mp;
+ rte_mempool_obj_ctor_t *obj_init;
+ void *obj_init_arg;
+};
+
+static void
+mempool_obj_populate(void *arg, void *start, void *end, uint32_t idx)
+{
+ struct mempool_populate_arg *pa = arg;
+
+ mempool_add_elem(pa->mp, start, idx, pa->obj_init, pa->obj_init_arg);
+ pa->mp->elt_va_end = (uintptr_t)end;
+}
+
+static void
+mempool_populate(struct rte_mempool *mp, size_t num, size_t align,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg)
+{
+ uint32_t elt_sz;
+ struct mempool_populate_arg arg;
+
+ elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
+ arg.mp = mp;
+ arg.obj_init = obj_init;
+ arg.obj_init_arg = obj_init_arg;
+
+ mp->size = rte_mempool_obj_iter((void *)mp->elt_va_start,
+ num, elt_sz, align,
+ mp->elt_pa, mp->pg_num, mp->pg_shift,
+ mempool_obj_populate, &arg);
+}
+
+uint32_t
+rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
+ struct rte_mempool_objsz *sz)
+{
+ struct rte_mempool_objsz lsz;
+
+ sz = (sz != NULL) ? sz : &lsz;
+
+ /*
+ * In header, we have at least the pointer to the pool, and
+ * optionaly a 64 bits cookie.
+ */
+ sz->header_size = 0;
+ sz->header_size += sizeof(struct rte_mempool *); /* ptr to pool */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ sz->header_size += sizeof(uint64_t); /* cookie */
+#endif
+ if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
+ sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
+ CACHE_LINE_SIZE);
+
+ /* trailer contains the cookie in debug mode */
+ sz->trailer_size = 0;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ sz->trailer_size += sizeof(uint64_t); /* cookie */
+#endif
+ /* element size is 8 bytes-aligned at least */
+ sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
+
+ /* expand trailer to next cache line */
+ if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
+ sz->total_size = sz->header_size + sz->elt_size +
+ sz->trailer_size;
+ sz->trailer_size += ((CACHE_LINE_SIZE -
+ (sz->total_size & CACHE_LINE_MASK)) &
+ CACHE_LINE_MASK);
+ }
+
+ /*
+ * increase trailer to add padding between objects in order to
+ * spread them across memory channels/ranks
+ */
+ if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
+ unsigned new_size;
+ new_size = optimize_object_size(sz->header_size + sz->elt_size +
+ sz->trailer_size);
+ sz->trailer_size = new_size - sz->header_size - sz->elt_size;
+ }
+
+ if (! rte_eal_has_hugepages()) {
+ /*
+ * compute trailer size so that pool elements fit exactly in
+ * a standard page
+ */
+ int page_size = getpagesize();
+ int new_size = page_size - sz->header_size - sz->elt_size;
+ if (new_size < 0 || (unsigned int)new_size < sz->trailer_size) {
+ printf("When hugepages are disabled, pool objects "
+ "can't exceed PAGE_SIZE: %d + %d + %d > %d\n",
+ sz->header_size, sz->elt_size, sz->trailer_size,
+ page_size);
+ return 0;
+ }
+ sz->trailer_size = new_size;
+ }
+
+ /* this is the size of an object, including header and trailer */
+ sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
+
+ return (sz->total_size);
+}
+
+
+/*
+ * Calculate maximum amount of memory required to store given number of objects.
+ */
+size_t
+rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, uint32_t pg_shift)
+{
+ size_t n, pg_num, pg_sz, sz;
+
+ pg_sz = (size_t)1 << pg_shift;
+
+ if ((n = pg_sz / elt_sz) > 0) {
+ pg_num = (elt_num + n - 1) / n;
+ sz = pg_num << pg_shift;
+ } else {
+ sz = RTE_ALIGN_CEIL(elt_sz, pg_sz) * elt_num;
+ }
+
+ return (sz);
+}
+
+/*
+ * Calculate how much memory would be actually required with the
+ * given memory footprint to store required number of elements.
+ */
+static void
+mempool_lelem_iter(void *arg, __rte_unused void *start, void *end,
+ __rte_unused uint32_t idx)
+{
+ *(uintptr_t *)arg = (uintptr_t)end;
+}
+
+ssize_t
+rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+{
+ uint32_t n;
+ uintptr_t va, uv;
+ size_t pg_sz, usz;
+
+ pg_sz = (size_t)1 << pg_shift;
+ va = (uintptr_t)vaddr;
+ uv = va;
+
+ if ((n = rte_mempool_obj_iter(vaddr, elt_num, elt_sz, 1,
+ paddr, pg_num, pg_shift, mempool_lelem_iter,
+ &uv)) != elt_num) {
+ return (-n);
+ }
+
+ uv = RTE_ALIGN_CEIL(uv, pg_sz);
+ usz = uv - va;
+ return (usz);
+}
+