1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
9 #include <rte_mempool.h>
10 #include <rte_errno.h>
13 /* indirect jump table to support external memory pools. */
14 struct rte_mempool_ops_table rte_mempool_ops_table = {
15 .sl = RTE_SPINLOCK_INITIALIZER,
19 /* add a new ops struct in rte_mempool_ops_table, return its index. */
21 rte_mempool_register_ops(const struct rte_mempool_ops *h)
23 struct rte_mempool_ops *ops;
26 rte_spinlock_lock(&rte_mempool_ops_table.sl);
28 if (rte_mempool_ops_table.num_ops >=
29 RTE_MEMPOOL_MAX_OPS_IDX) {
30 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
32 "Maximum number of mempool ops structs exceeded\n");
36 if (h->alloc == NULL || h->enqueue == NULL ||
37 h->dequeue == NULL || h->get_count == NULL) {
38 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
40 "Missing callback while registering mempool ops\n");
44 if (strlen(h->name) >= sizeof(ops->name) - 1) {
45 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
46 RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n",
52 ops_index = rte_mempool_ops_table.num_ops++;
53 ops = &rte_mempool_ops_table.ops[ops_index];
54 snprintf(ops->name, sizeof(ops->name), "%s", h->name);
55 ops->alloc = h->alloc;
57 ops->enqueue = h->enqueue;
58 ops->dequeue = h->dequeue;
59 ops->get_count = h->get_count;
60 ops->register_memory_area = h->register_memory_area;
61 ops->calc_mem_size = h->calc_mem_size;
62 ops->populate = h->populate;
64 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
69 /* wrapper to allocate an external mempool's private (pool) data. */
71 rte_mempool_ops_alloc(struct rte_mempool *mp)
73 struct rte_mempool_ops *ops;
75 ops = rte_mempool_get_ops(mp->ops_index);
76 return ops->alloc(mp);
79 /* wrapper to free an external pool ops. */
81 rte_mempool_ops_free(struct rte_mempool *mp)
83 struct rte_mempool_ops *ops;
85 ops = rte_mempool_get_ops(mp->ops_index);
86 if (ops->free == NULL)
91 /* wrapper to get available objects in an external mempool. */
93 rte_mempool_ops_get_count(const struct rte_mempool *mp)
95 struct rte_mempool_ops *ops;
97 ops = rte_mempool_get_ops(mp->ops_index);
98 return ops->get_count(mp);
101 /* wrapper to notify new memory area to external mempool */
103 rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr,
104 rte_iova_t iova, size_t len)
106 struct rte_mempool_ops *ops;
108 ops = rte_mempool_get_ops(mp->ops_index);
110 RTE_FUNC_PTR_OR_ERR_RET(ops->register_memory_area, -ENOTSUP);
111 return ops->register_memory_area(mp, vaddr, iova, len);
114 /* wrapper to notify new memory area to external mempool */
116 rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
117 uint32_t obj_num, uint32_t pg_shift,
118 size_t *min_chunk_size, size_t *align)
120 struct rte_mempool_ops *ops;
122 ops = rte_mempool_get_ops(mp->ops_index);
124 if (ops->calc_mem_size == NULL)
125 return rte_mempool_op_calc_mem_size_default(mp, obj_num,
126 pg_shift, min_chunk_size, align);
128 return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align);
131 /* wrapper to populate memory pool objects using provided memory chunk */
133 rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
134 void *vaddr, rte_iova_t iova, size_t len,
135 rte_mempool_populate_obj_cb_t *obj_cb,
138 struct rte_mempool_ops *ops;
140 ops = rte_mempool_get_ops(mp->ops_index);
142 if (ops->populate == NULL)
143 return rte_mempool_op_populate_default(mp, max_objs, vaddr,
147 return ops->populate(mp, max_objs, vaddr, iova, len, obj_cb,
151 /* sets mempool ops previously registered by rte_mempool_register_ops. */
153 rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
156 struct rte_mempool_ops *ops = NULL;
159 /* too late, the mempool is already populated. */
160 if (mp->flags & MEMPOOL_F_POOL_CREATED)
163 for (i = 0; i < rte_mempool_ops_table.num_ops; i++) {
165 rte_mempool_ops_table.ops[i].name)) {
166 ops = &rte_mempool_ops_table.ops[i];
175 mp->pool_config = pool_config;