1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
4 * Copyright(c) 2018 Solarflare Communications Inc.
7 #include <rte_mempool.h>
10 rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
11 uint32_t obj_num, uint32_t pg_shift,
13 size_t *min_chunk_size, size_t *align)
16 size_t obj_per_page, pg_sz, objs_in_last_page;
19 total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
20 if (total_elt_sz == 0) {
22 } else if (pg_shift == 0) {
23 mem_size = total_elt_sz * obj_num + chunk_reserve;
25 pg_sz = (size_t)1 << pg_shift;
26 if (chunk_reserve >= pg_sz)
28 obj_per_page = (pg_sz - chunk_reserve) / total_elt_sz;
29 if (obj_per_page == 0) {
31 * Note that if object size is bigger than page size,
32 * then it is assumed that pages are grouped in subsets
33 * of physically continuous pages big enough to store
34 * at least one object.
36 mem_size = RTE_ALIGN_CEIL(total_elt_sz + chunk_reserve,
39 /* In the best case, the allocator will return a
40 * page-aligned address. For example, with 5 objs,
41 * the required space is as below:
42 * | page0 | page1 | page2 (last) |
43 * |obj0 |obj1 |xxx|obj2 |obj3 |xxx|obj4|
44 * <------------- mem_size ------------->
46 objs_in_last_page = ((obj_num - 1) % obj_per_page) + 1;
47 /* room required for the last page */
48 mem_size = objs_in_last_page * total_elt_sz +
50 /* room required for other pages */
51 mem_size += ((obj_num - objs_in_last_page) /
52 obj_per_page) << pg_shift;
54 /* In the worst case, the allocator returns a
55 * non-aligned pointer, wasting up to
56 * total_elt_sz. Add a margin for that.
58 mem_size += total_elt_sz - 1;
62 *min_chunk_size = total_elt_sz;
63 *align = RTE_MEMPOOL_ALIGN;
69 rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
70 uint32_t obj_num, uint32_t pg_shift,
71 size_t *min_chunk_size, size_t *align)
73 return rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
74 0, min_chunk_size, align);
77 /* Returns -1 if object crosses a page boundary, else returns 0 */
79 check_obj_bounds(char *obj, size_t pg_sz, size_t elt_sz)
85 if (RTE_PTR_ALIGN(obj, pg_sz) != RTE_PTR_ALIGN(obj + elt_sz - 1, pg_sz))
91 rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int flags,
92 unsigned int max_objs, void *vaddr, rte_iova_t iova,
93 size_t len, rte_mempool_populate_obj_cb_t *obj_cb,
97 size_t total_elt_sz, pg_sz;
103 ret = rte_mempool_get_page_size(mp, &pg_sz);
107 total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
109 if (flags & RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ)
110 off = total_elt_sz - (((uintptr_t)(va - 1) % total_elt_sz) + 1);
113 for (i = 0; i < max_objs; i++) {
114 /* avoid objects to cross page boundaries */
115 if (check_obj_bounds(va + off, pg_sz, total_elt_sz) < 0) {
116 off += RTE_PTR_ALIGN_CEIL(va + off, pg_sz) - (va + off);
117 if (flags & RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ)
118 off += total_elt_sz -
119 (((uintptr_t)(va + off - 1) %
123 if (off + total_elt_sz > len)
126 off += mp->header_size;
128 obj_cb(mp, obj_cb_arg, obj,
129 (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));
130 rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
131 off += mp->elt_size + mp->trailer_size;
138 rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
139 void *vaddr, rte_iova_t iova, size_t len,
140 rte_mempool_populate_obj_cb_t *obj_cb,
143 return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,
144 len, obj_cb, obj_cb_arg);