1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
10 #include <rte_mempool.h>
13 common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
16 return rte_ring_mp_enqueue_bulk(mp->pool_data,
17 obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
21 common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
24 return rte_ring_sp_enqueue_bulk(mp->pool_data,
25 obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
29 rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
32 return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
33 obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
37 hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
40 return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
41 obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
45 common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
47 return rte_ring_mc_dequeue_bulk(mp->pool_data,
48 obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
52 common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
54 return rte_ring_sc_dequeue_bulk(mp->pool_data,
55 obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
59 rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
61 return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
62 obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
66 hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
68 return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
69 obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
73 common_ring_get_count(const struct rte_mempool *mp)
75 return rte_ring_count(mp->pool_data);
79 ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
82 char rg_name[RTE_RING_NAMESIZE];
85 ret = snprintf(rg_name, sizeof(rg_name),
86 RTE_MEMPOOL_MZ_FORMAT, mp->name);
87 if (ret < 0 || ret >= (int)sizeof(rg_name)) {
88 rte_errno = ENAMETOOLONG;
93 * Allocate the ring that will be used to store objects.
94 * Ring functions will return appropriate errors if we are
95 * running as a secondary process etc., so no checks made
96 * in this function for that condition.
98 r = rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
99 mp->socket_id, rg_flags);
109 common_ring_alloc(struct rte_mempool *mp)
111 uint32_t rg_flags = 0;
113 if (mp->flags & MEMPOOL_F_SP_PUT)
114 rg_flags |= RING_F_SP_ENQ;
115 if (mp->flags & MEMPOOL_F_SC_GET)
116 rg_flags |= RING_F_SC_DEQ;
118 return ring_alloc(mp, rg_flags);
122 rts_ring_alloc(struct rte_mempool *mp)
124 return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
128 hts_ring_alloc(struct rte_mempool *mp)
130 return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
134 common_ring_free(struct rte_mempool *mp)
136 rte_ring_free(mp->pool_data);
140 * The following 4 declarations of mempool ops structs address
141 * the need for the backward compatible mempool handlers for
142 * single/multi producers and single/multi consumers as dictated by the
143 * flags provided to the rte_mempool_create function
145 static const struct rte_mempool_ops ops_mp_mc = {
146 .name = "ring_mp_mc",
147 .alloc = common_ring_alloc,
148 .free = common_ring_free,
149 .enqueue = common_ring_mp_enqueue,
150 .dequeue = common_ring_mc_dequeue,
151 .get_count = common_ring_get_count,
154 static const struct rte_mempool_ops ops_sp_sc = {
155 .name = "ring_sp_sc",
156 .alloc = common_ring_alloc,
157 .free = common_ring_free,
158 .enqueue = common_ring_sp_enqueue,
159 .dequeue = common_ring_sc_dequeue,
160 .get_count = common_ring_get_count,
163 static const struct rte_mempool_ops ops_mp_sc = {
164 .name = "ring_mp_sc",
165 .alloc = common_ring_alloc,
166 .free = common_ring_free,
167 .enqueue = common_ring_mp_enqueue,
168 .dequeue = common_ring_sc_dequeue,
169 .get_count = common_ring_get_count,
172 static const struct rte_mempool_ops ops_sp_mc = {
173 .name = "ring_sp_mc",
174 .alloc = common_ring_alloc,
175 .free = common_ring_free,
176 .enqueue = common_ring_sp_enqueue,
177 .dequeue = common_ring_mc_dequeue,
178 .get_count = common_ring_get_count,
181 /* ops for mempool with ring in MT_RTS sync mode */
182 static const struct rte_mempool_ops ops_mt_rts = {
183 .name = "ring_mt_rts",
184 .alloc = rts_ring_alloc,
185 .free = common_ring_free,
186 .enqueue = rts_ring_mp_enqueue,
187 .dequeue = rts_ring_mc_dequeue,
188 .get_count = common_ring_get_count,
191 /* ops for mempool with ring in MT_HTS sync mode */
192 static const struct rte_mempool_ops ops_mt_hts = {
193 .name = "ring_mt_hts",
194 .alloc = hts_ring_alloc,
195 .free = common_ring_free,
196 .enqueue = hts_ring_mp_enqueue,
197 .dequeue = hts_ring_mc_dequeue,
198 .get_count = common_ring_get_count,
201 MEMPOOL_REGISTER_OPS(ops_mp_mc);
202 MEMPOOL_REGISTER_OPS(ops_sp_sc);
203 MEMPOOL_REGISTER_OPS(ops_mp_sc);
204 MEMPOOL_REGISTER_OPS(ops_sp_mc);
205 MEMPOOL_REGISTER_OPS(ops_mt_rts);
206 MEMPOOL_REGISTER_OPS(ops_mt_hts);