mempool: introduce helpers for populate and required size
[dpdk.git] / lib / librte_mempool / rte_mempool_ops_default.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Intel Corporation.
3  * Copyright(c) 2016 6WIND S.A.
4  * Copyright(c) 2018 Solarflare Communications Inc.
5  */
6
7 #include <rte_mempool.h>
8
9 ssize_t
10 rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
11                                 uint32_t obj_num, uint32_t pg_shift,
12                                 size_t *min_chunk_size, size_t *align)
13 {
14         size_t total_elt_sz;
15         size_t obj_per_page, pg_sz, objs_in_last_page;
16         size_t mem_size;
17
18         total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
19         if (total_elt_sz == 0) {
20                 mem_size = 0;
21         } else if (pg_shift == 0) {
22                 mem_size = total_elt_sz * obj_num;
23         } else {
24                 pg_sz = (size_t)1 << pg_shift;
25                 obj_per_page = pg_sz / total_elt_sz;
26                 if (obj_per_page == 0) {
27                         /*
28                          * Note that if object size is bigger than page size,
29                          * then it is assumed that pages are grouped in subsets
30                          * of physically continuous pages big enough to store
31                          * at least one object.
32                          */
33                         mem_size =
34                                 RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * obj_num;
35                 } else {
36                         /* In the best case, the allocator will return a
37                          * page-aligned address. For example, with 5 objs,
38                          * the required space is as below:
39                          *  |     page0     |     page1     |  page2 (last) |
40                          *  |obj0 |obj1 |xxx|obj2 |obj3 |xxx|obj4|
41                          *  <------------- mem_size ------------->
42                          */
43                         objs_in_last_page = ((obj_num - 1) % obj_per_page) + 1;
44                         /* room required for the last page */
45                         mem_size = objs_in_last_page * total_elt_sz;
46                         /* room required for other pages */
47                         mem_size += ((obj_num - objs_in_last_page) /
48                                 obj_per_page) << pg_shift;
49
50                         /* In the worst case, the allocator returns a
51                          * non-aligned pointer, wasting up to
52                          * total_elt_sz. Add a margin for that.
53                          */
54                          mem_size += total_elt_sz - 1;
55                 }
56         }
57
58         *min_chunk_size = total_elt_sz;
59         *align = RTE_CACHE_LINE_SIZE;
60
61         return mem_size;
62 }
63
64 ssize_t
65 rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
66                                 uint32_t obj_num, uint32_t pg_shift,
67                                 size_t *min_chunk_size, size_t *align)
68 {
69         return rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
70                                                 min_chunk_size, align);
71 }
72
73 int
74 rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int max_objs,
75                         void *vaddr, rte_iova_t iova, size_t len,
76                         rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
77 {
78         size_t total_elt_sz;
79         size_t off;
80         unsigned int i;
81         void *obj;
82
83         total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
84
85         for (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) {
86                 off += mp->header_size;
87                 obj = (char *)vaddr + off;
88                 obj_cb(mp, obj_cb_arg, obj,
89                        (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));
90                 rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
91                 off += mp->elt_size + mp->trailer_size;
92         }
93
94         return i;
95 }
96
97 int
98 rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
99                                 void *vaddr, rte_iova_t iova, size_t len,
100                                 rte_mempool_populate_obj_cb_t *obj_cb,
101                                 void *obj_cb_arg)
102 {
103         return rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova,
104                                         len, obj_cb, obj_cb_arg);
105 }