1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
9 #include <rte_mempool.h>
10 #include <rte_errno.h>
13 /* indirect jump table to support external memory pools. */
14 struct rte_mempool_ops_table rte_mempool_ops_table = {
15 .sl = RTE_SPINLOCK_INITIALIZER,
19 /* add a new ops struct in rte_mempool_ops_table, return its index. */
21 rte_mempool_register_ops(const struct rte_mempool_ops *h)
23 struct rte_mempool_ops *ops;
26 rte_spinlock_lock(&rte_mempool_ops_table.sl);
28 if (rte_mempool_ops_table.num_ops >=
29 RTE_MEMPOOL_MAX_OPS_IDX) {
30 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
32 "Maximum number of mempool ops structs exceeded\n");
36 if (h->alloc == NULL || h->enqueue == NULL ||
37 h->dequeue == NULL || h->get_count == NULL) {
38 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
40 "Missing callback while registering mempool ops\n");
44 if (strlen(h->name) >= sizeof(ops->name) - 1) {
45 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
46 RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n",
52 ops_index = rte_mempool_ops_table.num_ops++;
53 ops = &rte_mempool_ops_table.ops[ops_index];
54 snprintf(ops->name, sizeof(ops->name), "%s", h->name);
55 ops->alloc = h->alloc;
57 ops->enqueue = h->enqueue;
58 ops->dequeue = h->dequeue;
59 ops->get_count = h->get_count;
60 ops->calc_mem_size = h->calc_mem_size;
61 ops->populate = h->populate;
63 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
68 /* wrapper to allocate an external mempool's private (pool) data. */
70 rte_mempool_ops_alloc(struct rte_mempool *mp)
72 struct rte_mempool_ops *ops;
74 ops = rte_mempool_get_ops(mp->ops_index);
75 return ops->alloc(mp);
78 /* wrapper to free an external pool ops. */
80 rte_mempool_ops_free(struct rte_mempool *mp)
82 struct rte_mempool_ops *ops;
84 ops = rte_mempool_get_ops(mp->ops_index);
85 if (ops->free == NULL)
90 /* wrapper to get available objects in an external mempool. */
92 rte_mempool_ops_get_count(const struct rte_mempool *mp)
94 struct rte_mempool_ops *ops;
96 ops = rte_mempool_get_ops(mp->ops_index);
97 return ops->get_count(mp);
100 /* wrapper to notify new memory area to external mempool */
102 rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
103 uint32_t obj_num, uint32_t pg_shift,
104 size_t *min_chunk_size, size_t *align)
106 struct rte_mempool_ops *ops;
108 ops = rte_mempool_get_ops(mp->ops_index);
110 if (ops->calc_mem_size == NULL)
111 return rte_mempool_op_calc_mem_size_default(mp, obj_num,
112 pg_shift, min_chunk_size, align);
114 return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align);
117 /* wrapper to populate memory pool objects using provided memory chunk */
119 rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
120 void *vaddr, rte_iova_t iova, size_t len,
121 rte_mempool_populate_obj_cb_t *obj_cb,
124 struct rte_mempool_ops *ops;
126 ops = rte_mempool_get_ops(mp->ops_index);
128 if (ops->populate == NULL)
129 return rte_mempool_op_populate_default(mp, max_objs, vaddr,
133 return ops->populate(mp, max_objs, vaddr, iova, len, obj_cb,
137 /* sets mempool ops previously registered by rte_mempool_register_ops. */
139 rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
142 struct rte_mempool_ops *ops = NULL;
145 /* too late, the mempool is already populated. */
146 if (mp->flags & MEMPOOL_F_POOL_CREATED)
149 for (i = 0; i < rte_mempool_ops_table.num_ops; i++) {
151 rte_mempool_ops_table.ops[i].name)) {
152 ops = &rte_mempool_ops_table.ops[i];
161 mp->pool_config = pool_config;