1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
4 * Copyright(c) 2018 Solarflare Communications Inc.
7 #include <rte_mempool.h>
10 rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
11 uint32_t obj_num, uint32_t pg_shift,
12 size_t *min_chunk_size, size_t *align)
15 size_t obj_per_page, pg_sz, objs_in_last_page;
18 total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
19 if (total_elt_sz == 0) {
21 } else if (pg_shift == 0) {
22 mem_size = total_elt_sz * obj_num;
24 pg_sz = (size_t)1 << pg_shift;
25 obj_per_page = pg_sz / total_elt_sz;
26 if (obj_per_page == 0) {
28 * Note that if object size is bigger than page size,
29 * then it is assumed that pages are grouped in subsets
30 * of physically continuous pages big enough to store
31 * at least one object.
34 RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * obj_num;
36 /* In the best case, the allocator will return a
37 * page-aligned address. For example, with 5 objs,
38 * the required space is as below:
39 * | page0 | page1 | page2 (last) |
40 * |obj0 |obj1 |xxx|obj2 |obj3 |xxx|obj4|
41 * <------------- mem_size ------------->
43 objs_in_last_page = ((obj_num - 1) % obj_per_page) + 1;
44 /* room required for the last page */
45 mem_size = objs_in_last_page * total_elt_sz;
46 /* room required for other pages */
47 mem_size += ((obj_num - objs_in_last_page) /
48 obj_per_page) << pg_shift;
50 /* In the worst case, the allocator returns a
51 * non-aligned pointer, wasting up to
52 * total_elt_sz. Add a margin for that.
54 mem_size += total_elt_sz - 1;
58 *min_chunk_size = total_elt_sz;
59 *align = RTE_CACHE_LINE_SIZE;
65 rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
66 uint32_t obj_num, uint32_t pg_shift,
67 size_t *min_chunk_size, size_t *align)
69 return rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
70 min_chunk_size, align);
74 rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int max_objs,
75 void *vaddr, rte_iova_t iova, size_t len,
76 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
83 total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
85 for (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) {
86 off += mp->header_size;
87 obj = (char *)vaddr + off;
88 obj_cb(mp, obj_cb_arg, obj,
89 (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));
90 rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
91 off += mp->elt_size + mp->trailer_size;
98 rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
99 void *vaddr, rte_iova_t iova, size_t len,
100 rte_mempool_populate_obj_cb_t *obj_cb,
103 return rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova,
104 len, obj_cb, obj_cb_arg);