mempool: introduce helpers for populate and required size
[dpdk.git] / lib / librte_mempool / rte_mempool_ops_default.c
index 4e2bfc8..0bfc634 100644 (file)
@@ -7,12 +7,12 @@
 #include <rte_mempool.h>
 
 ssize_t
-rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
-                                    uint32_t obj_num, uint32_t pg_shift,
-                                    size_t *min_chunk_size, size_t *align)
+rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
+                               uint32_t obj_num, uint32_t pg_shift,
+                               size_t *min_chunk_size, size_t *align)
 {
        size_t total_elt_sz;
-       size_t obj_per_page, pg_num, pg_sz;
+       size_t obj_per_page, pg_sz, objs_in_last_page;
        size_t mem_size;
 
        total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
@@ -33,22 +33,47 @@ rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
                        mem_size =
                                RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * obj_num;
                } else {
-                       pg_num = (obj_num + obj_per_page - 1) / obj_per_page;
-                       mem_size = pg_num << pg_shift;
+                       /* In the best case, the allocator will return a
+                        * page-aligned address. For example, with 5 objs,
+                        * the required space is as below:
+                        *  |     page0     |     page1     |  page2 (last) |
+                        *  |obj0 |obj1 |xxx|obj2 |obj3 |xxx|obj4|
+                        *  <------------- mem_size ------------->
+                        */
+                       objs_in_last_page = ((obj_num - 1) % obj_per_page) + 1;
+                       /* room required for the last page */
+                       mem_size = objs_in_last_page * total_elt_sz;
+                       /* room required for other pages */
+                       mem_size += ((obj_num - objs_in_last_page) /
+                               obj_per_page) << pg_shift;
+
+                       /* In the worst case, the allocator returns a
+                        * non-aligned pointer, wasting up to
+                        * total_elt_sz. Add a margin for that.
+                        */
+                        mem_size += total_elt_sz - 1;
                }
        }
 
-       *min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz);
-
-       *align = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, (size_t)1 << pg_shift);
+       *min_chunk_size = total_elt_sz;
+       *align = RTE_CACHE_LINE_SIZE;
 
        return mem_size;
 }
 
+ssize_t
+rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
+                               uint32_t obj_num, uint32_t pg_shift,
+                               size_t *min_chunk_size, size_t *align)
+{
+       return rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
+                                               min_chunk_size, align);
+}
+
 int
-rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
-               void *vaddr, rte_iova_t iova, size_t len,
-               rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int max_objs,
+                       void *vaddr, rte_iova_t iova, size_t len,
+                       rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
 {
        size_t total_elt_sz;
        size_t off;
@@ -68,3 +93,13 @@ rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
 
        return i;
 }
+
+int
+rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
+                               void *vaddr, rte_iova_t iova, size_t len,
+                               rte_mempool_populate_obj_cb_t *obj_cb,
+                               void *obj_cb_arg)
+{
+       return rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova,
+                                       len, obj_cb, obj_cb_arg);
+}