4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
37 #pragma GCC diagnostic ignored "-Wpedantic"
39 #include <infiniband/verbs.h>
41 #pragma GCC diagnostic error "-Wpedantic"
44 #include <rte_mempool.h>
45 #include <rte_malloc.h>
48 #include "mlx5_rxtx.h"
50 struct mlx5_check_mempool_data {
56 /* Called by mlx5_check_mempool() when iterating the memory chunks. */
58 mlx5_check_mempool_cb(struct rte_mempool *mp,
59 void *opaque, struct rte_mempool_memhdr *memhdr,
62 struct mlx5_check_mempool_data *data = opaque;
67 /* It already failed, skip the next chunks. */
70 /* It is the first chunk. */
71 if (data->start == NULL && data->end == NULL) {
72 data->start = memhdr->addr;
73 data->end = data->start + memhdr->len;
76 if (data->end == memhdr->addr) {
77 data->end += memhdr->len;
80 if (data->start == (char *)memhdr->addr + memhdr->len) {
81 data->start -= memhdr->len;
84 /* Error, mempool is not virtually contiguous. */
89 * Check if a mempool can be used: it must be virtually contiguous.
92 * Pointer to memory pool.
94 * Pointer to the start address of the mempool virtual memory area
96 * Pointer to the end address of the mempool virtual memory area
99 * 0 on success (mempool is virtually contiguous), -1 on error.
101 static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
104 struct mlx5_check_mempool_data data;
106 memset(&data, 0, sizeof(data));
107 rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
108 *start = (uintptr_t)data.start;
109 *end = (uintptr_t)data.end;
115 * Register a Memory Region (MR) <-> Memory Pool (MP) association in
116 * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
118 * This function should only be called by txq_mp2mr().
121 * Pointer to TX queue structure.
123 * Memory Pool for which a Memory Region lkey must be returned.
125 * Index of the next available entry.
128 * mr on success, NULL on failure.
131 mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
134 struct mlx5_txq_ctrl *txq_ctrl =
135 container_of(txq, struct mlx5_txq_ctrl, txq);
138 /* Add a new entry, register MR first. */
139 DEBUG("%p: discovered new memory pool \"%s\" (%p)",
140 (void *)txq_ctrl, mp->name, (void *)mp);
141 mr = priv_mr_get(txq_ctrl->priv, mp);
143 mr = priv_mr_new(txq_ctrl->priv, mp);
144 if (unlikely(mr == NULL)) {
145 DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
149 if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
150 /* Table is full, remove oldest entry. */
151 DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
154 priv_mr_release(txq_ctrl->priv, txq->mp2mr[0]);
155 memmove(&txq->mp2mr[0], &txq->mp2mr[1],
156 (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
158 /* Store the new entry. */
159 txq_ctrl->txq.mp2mr[idx] = mr;
160 DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
161 (void *)txq_ctrl, mp->name, (void *)mp,
162 txq_ctrl->txq.mp2mr[idx]->lkey);
166 struct txq_mp2mr_mbuf_check_data {
171 * Callback function for rte_mempool_obj_iter() to check whether a given
172 * mempool object looks like a mbuf.
175 * The mempool pointer
177 * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
182 * Object index, unused.
185 txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
186 uint32_t index __rte_unused)
188 struct txq_mp2mr_mbuf_check_data *data = arg;
189 struct rte_mbuf *buf = obj;
192 * Check whether mbuf structure fits element size and whether mempool
195 if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
200 * Iterator function for rte_mempool_walk() to register existing mempools and
201 * fill the MP to MR cache of a TX queue.
204 * Memory Pool to register.
206 * Pointer to TX queue structure.
209 mlx5_txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
211 struct mlx5_txq_ctrl *txq_ctrl = arg;
212 struct txq_mp2mr_mbuf_check_data data = {
219 /* Register mempool only if the first element looks like a mbuf. */
220 if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
223 if (mlx5_check_mempool(mp, &start, &end) != 0) {
224 ERROR("mempool %p: not virtually contiguous",
228 for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
229 if (unlikely(txq_ctrl->txq.mp2mr[i] == NULL)) {
230 /* Unknown MP, add a new MR for it. */
233 if (start >= (uintptr_t)txq_ctrl->txq.mp2mr[i]->start &&
234 end <= (uintptr_t)txq_ctrl->txq.mp2mr[i]->end)
237 mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mp, i);
241 * Register a new memory region from the mempool and store it in the memory
245 * Pointer to private structure.
247 * Pointer to the memory pool to register.
249 * The memory region on success.
252 priv_mr_new(struct priv *priv, struct rte_mempool *mp)
254 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
260 mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
262 DEBUG("unable to configure MR, ibv_reg_mr() failed.");
265 if (mlx5_check_mempool(mp, &start, &end) != 0) {
266 ERROR("mempool %p: not virtually contiguous",
270 DEBUG("mempool %p area start=%p end=%p size=%zu",
271 (void *)mp, (void *)start, (void *)end,
272 (size_t)(end - start));
273 /* Round start and end to page boundary if found in memory segments. */
274 for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
275 uintptr_t addr = (uintptr_t)ms[i].addr;
276 size_t len = ms[i].len;
277 unsigned int align = ms[i].hugepage_sz;
279 if ((start > addr) && (start < addr + len))
280 start = RTE_ALIGN_FLOOR(start, align);
281 if ((end > addr) && (end < addr + len))
282 end = RTE_ALIGN_CEIL(end, align);
284 DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
285 (void *)mp, (void *)start, (void *)end,
286 (size_t)(end - start));
287 mr->mr = ibv_reg_mr(priv->pd, (void *)start, end - start,
288 IBV_ACCESS_LOCAL_WRITE);
290 mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
292 mr->end = (uintptr_t)mr->mr->addr + mr->mr->length;
293 rte_atomic32_inc(&mr->refcnt);
294 DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv,
295 (void *)mr, rte_atomic32_read(&mr->refcnt));
296 LIST_INSERT_HEAD(&priv->mr, mr, next);
301 * Search the memory region object in the memory region list.
304 * Pointer to private structure.
306 * Pointer to the memory pool to register.
308 * The memory region on success.
311 priv_mr_get(struct priv *priv, struct rte_mempool *mp)
316 if (LIST_EMPTY(&priv->mr))
318 LIST_FOREACH(mr, &priv->mr, next) {
320 rte_atomic32_inc(&mr->refcnt);
321 DEBUG("Memory Region %p refcnt: %d",
322 (void *)mr, rte_atomic32_read(&mr->refcnt));
330 * Release the memory region object.
333 * Pointer to memory region to release.
336 * 0 on success, errno on failure.
339 priv_mr_release(struct priv *priv, struct mlx5_mr *mr)
343 DEBUG("Memory Region %p refcnt: %d",
344 (void *)mr, rte_atomic32_read(&mr->refcnt));
345 if (rte_atomic32_dec_and_test(&mr->refcnt)) {
346 claim_zero(ibv_dereg_mr(mr->mr));
347 LIST_REMOVE(mr, next);
355 * Verify the flow list is empty
358 * Pointer to private structure.
360 * @return the number of object not released.
363 priv_mr_verify(struct priv *priv)
368 LIST_FOREACH(mr, &priv->mr, next) {
369 DEBUG("%p: mr %p still referenced", (void *)priv,