mempool: prevent objects from being across pages
[dpdk.git] / lib / librte_mempool / rte_mempool_ops_default.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Intel Corporation.
3  * Copyright(c) 2016 6WIND S.A.
4  * Copyright(c) 2018 Solarflare Communications Inc.
5  */
6
7 #include <rte_mempool.h>
8
9 ssize_t
10 rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
11                                 uint32_t obj_num, uint32_t pg_shift,
12                                 size_t chunk_reserve,
13                                 size_t *min_chunk_size, size_t *align)
14 {
15         size_t total_elt_sz;
16         size_t obj_per_page, pg_sz, objs_in_last_page;
17         size_t mem_size;
18
19         total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
20         if (total_elt_sz == 0) {
21                 mem_size = 0;
22         } else if (pg_shift == 0) {
23                 mem_size = total_elt_sz * obj_num + chunk_reserve;
24         } else {
25                 pg_sz = (size_t)1 << pg_shift;
26                 if (chunk_reserve >= pg_sz)
27                         return -EINVAL;
28                 obj_per_page = (pg_sz - chunk_reserve) / total_elt_sz;
29                 if (obj_per_page == 0) {
30                         /*
31                          * Note that if object size is bigger than page size,
32                          * then it is assumed that pages are grouped in subsets
33                          * of physically continuous pages big enough to store
34                          * at least one object.
35                          */
36                         mem_size = RTE_ALIGN_CEIL(total_elt_sz + chunk_reserve,
37                                                 pg_sz) * obj_num;
38                 } else {
39                         /* In the best case, the allocator will return a
40                          * page-aligned address. For example, with 5 objs,
41                          * the required space is as below:
42                          *  |     page0     |     page1     |  page2 (last) |
43                          *  |obj0 |obj1 |xxx|obj2 |obj3 |xxx|obj4|
44                          *  <------------- mem_size ------------->
45                          */
46                         objs_in_last_page = ((obj_num - 1) % obj_per_page) + 1;
47                         /* room required for the last page */
48                         mem_size = objs_in_last_page * total_elt_sz +
49                                 chunk_reserve;
50                         /* room required for other pages */
51                         mem_size += ((obj_num - objs_in_last_page) /
52                                 obj_per_page) << pg_shift;
53
54                         /* In the worst case, the allocator returns a
55                          * non-aligned pointer, wasting up to
56                          * total_elt_sz. Add a margin for that.
57                          */
58                          mem_size += total_elt_sz - 1;
59                 }
60         }
61
62         *min_chunk_size = total_elt_sz;
63         *align = RTE_CACHE_LINE_SIZE;
64
65         return mem_size;
66 }
67
68 ssize_t
69 rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
70                                 uint32_t obj_num, uint32_t pg_shift,
71                                 size_t *min_chunk_size, size_t *align)
72 {
73         return rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
74                                                 0, min_chunk_size, align);
75 }
76
77 /* Returns -1 if object crosses a page boundary, else returns 0 */
78 static int
79 check_obj_bounds(char *obj, size_t pg_sz, size_t elt_sz)
80 {
81         if (pg_sz == 0)
82                 return 0;
83         if (elt_sz > pg_sz)
84                 return 0;
85         if (RTE_PTR_ALIGN(obj, pg_sz) != RTE_PTR_ALIGN(obj + elt_sz - 1, pg_sz))
86                 return -1;
87         return 0;
88 }
89
90 int
91 rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int flags,
92                         unsigned int max_objs, void *vaddr, rte_iova_t iova,
93                         size_t len, rte_mempool_populate_obj_cb_t *obj_cb,
94                         void *obj_cb_arg)
95 {
96         char *va = vaddr;
97         size_t total_elt_sz, pg_sz;
98         size_t off;
99         unsigned int i;
100         void *obj;
101         int ret;
102
103         ret = rte_mempool_get_page_size(mp, &pg_sz);
104         if (ret < 0)
105                 return ret;
106
107         total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
108
109         if (flags & RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ)
110                 off = total_elt_sz - (((uintptr_t)(va - 1) % total_elt_sz) + 1);
111         else
112                 off = 0;
113         for (i = 0; i < max_objs; i++) {
114                 /* avoid objects to cross page boundaries */
115                 if (check_obj_bounds(va + off, pg_sz, total_elt_sz) < 0) {
116                         off += RTE_PTR_ALIGN_CEIL(va + off, pg_sz) - (va + off);
117                         if (flags & RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ)
118                                 off += total_elt_sz -
119                                         (((uintptr_t)(va + off - 1) %
120                                                 total_elt_sz) + 1);
121                 }
122
123                 if (off + total_elt_sz > len)
124                         break;
125
126                 off += mp->header_size;
127                 obj = va + off;
128                 obj_cb(mp, obj_cb_arg, obj,
129                        (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));
130                 rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
131                 off += mp->elt_size + mp->trailer_size;
132         }
133
134         return i;
135 }
136
137 int
138 rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
139                                 void *vaddr, rte_iova_t iova, size_t len,
140                                 rte_mempool_populate_obj_cb_t *obj_cb,
141                                 void *obj_cb_arg)
142 {
143         return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,
144                                         len, obj_cb, obj_cb_arg);
145 }