4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
5 * Copyright(c) 2016 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_mempool.h>
39 #include <rte_errno.h>
42 /* indirect jump table to support external memory pools. */
43 struct rte_mempool_ops_table rte_mempool_ops_table = {
44 .sl = RTE_SPINLOCK_INITIALIZER,
48 /* add a new ops struct in rte_mempool_ops_table, return its index. */
50 rte_mempool_register_ops(const struct rte_mempool_ops *h)
52 struct rte_mempool_ops *ops;
55 rte_spinlock_lock(&rte_mempool_ops_table.sl);
57 if (rte_mempool_ops_table.num_ops >=
58 RTE_MEMPOOL_MAX_OPS_IDX) {
59 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
61 "Maximum number of mempool ops structs exceeded\n");
65 if (h->alloc == NULL || h->enqueue == NULL ||
66 h->dequeue == NULL || h->get_count == NULL) {
67 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
69 "Missing callback while registering mempool ops\n");
73 if (strlen(h->name) >= sizeof(ops->name) - 1) {
74 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
75 RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n",
81 ops_index = rte_mempool_ops_table.num_ops++;
82 ops = &rte_mempool_ops_table.ops[ops_index];
83 snprintf(ops->name, sizeof(ops->name), "%s", h->name);
84 ops->alloc = h->alloc;
86 ops->enqueue = h->enqueue;
87 ops->dequeue = h->dequeue;
88 ops->get_count = h->get_count;
89 ops->get_capabilities = h->get_capabilities;
90 ops->register_memory_area = h->register_memory_area;
92 rte_spinlock_unlock(&rte_mempool_ops_table.sl);
97 /* wrapper to allocate an external mempool's private (pool) data. */
99 rte_mempool_ops_alloc(struct rte_mempool *mp)
101 struct rte_mempool_ops *ops;
103 ops = rte_mempool_get_ops(mp->ops_index);
104 return ops->alloc(mp);
107 /* wrapper to free an external pool ops. */
109 rte_mempool_ops_free(struct rte_mempool *mp)
111 struct rte_mempool_ops *ops;
113 ops = rte_mempool_get_ops(mp->ops_index);
114 if (ops->free == NULL)
119 /* wrapper to get available objects in an external mempool. */
121 rte_mempool_ops_get_count(const struct rte_mempool *mp)
123 struct rte_mempool_ops *ops;
125 ops = rte_mempool_get_ops(mp->ops_index);
126 return ops->get_count(mp);
129 /* wrapper to get external mempool capabilities. */
131 rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
134 struct rte_mempool_ops *ops;
136 ops = rte_mempool_get_ops(mp->ops_index);
138 RTE_FUNC_PTR_OR_ERR_RET(ops->get_capabilities, -ENOTSUP);
139 return ops->get_capabilities(mp, flags);
142 /* wrapper to notify new memory area to external mempool */
144 rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr,
145 rte_iova_t iova, size_t len)
147 struct rte_mempool_ops *ops;
149 ops = rte_mempool_get_ops(mp->ops_index);
151 RTE_FUNC_PTR_OR_ERR_RET(ops->register_memory_area, -ENOTSUP);
152 return ops->register_memory_area(mp, vaddr, iova, len);
155 /* sets mempool ops previously registered by rte_mempool_register_ops. */
157 rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
160 struct rte_mempool_ops *ops = NULL;
163 /* too late, the mempool is already populated. */
164 if (mp->flags & MEMPOOL_F_POOL_CREATED)
167 for (i = 0; i < rte_mempool_ops_table.num_ops; i++) {
169 rte_mempool_ops_table.ops[i].name)) {
170 ops = &rte_mempool_ops_table.ops[i];
179 mp->pool_config = pool_config;