1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox.
7 #pragma GCC diagnostic ignored "-Wpedantic"
9 #include <infiniband/verbs.h>
11 #pragma GCC diagnostic error "-Wpedantic"
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
18 #include "mlx5_rxtx.h"
19 #include "mlx5_glue.h"
21 struct mlx5_check_mempool_data {
27 /* Called by mlx5_check_mempool() when iterating the memory chunks. */
29 mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused,
30 void *opaque, struct rte_mempool_memhdr *memhdr,
31 unsigned int mem_idx __rte_unused)
33 struct mlx5_check_mempool_data *data = opaque;
35 /* It already failed, skip the next chunks. */
38 /* It is the first chunk. */
39 if (data->start == NULL && data->end == NULL) {
40 data->start = memhdr->addr;
41 data->end = data->start + memhdr->len;
44 if (data->end == memhdr->addr) {
45 data->end += memhdr->len;
48 if (data->start == (char *)memhdr->addr + memhdr->len) {
49 data->start -= memhdr->len;
52 /* Error, mempool is not virtually contiguous. */
57 * Check if a mempool can be used: it must be virtually contiguous.
60 * Pointer to memory pool.
62 * Pointer to the start address of the mempool virtual memory area
64 * Pointer to the end address of the mempool virtual memory area
67 * 0 on success (mempool is virtually contiguous), -1 on error.
70 mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
73 struct mlx5_check_mempool_data data;
75 memset(&data, 0, sizeof(data));
76 rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
77 *start = (uintptr_t)data.start;
78 *end = (uintptr_t)data.end;
83 * Register a Memory Region (MR) <-> Memory Pool (MP) association in
84 * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
87 * Pointer to TX queue structure.
89 * Memory Pool for which a Memory Region lkey must be returned.
91 * Index of the next available entry.
94 * mr on success, NULL on failure.
97 mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
100 struct mlx5_txq_ctrl *txq_ctrl =
101 container_of(txq, struct mlx5_txq_ctrl, txq);
102 struct rte_eth_dev *dev;
105 rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
106 /* Add a new entry, register MR first. */
107 DEBUG("%p: discovered new memory pool \"%s\" (%p)",
108 (void *)txq_ctrl, mp->name, (void *)mp);
109 dev = txq_ctrl->priv->dev;
110 mr = mlx5_mr_get(dev, mp);
112 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
113 DEBUG("Using unregistered mempool 0x%p(%s) in "
114 "secondary process, please create mempool before "
115 " rte_eth_dev_start()",
116 (void *)mp, mp->name);
117 rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
120 mr = mlx5_mr_new(dev, mp);
122 if (unlikely(mr == NULL)) {
123 DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
125 rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
128 if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
129 /* Table is full, remove oldest entry. */
130 DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
133 mlx5_mr_release(txq->mp2mr[0]);
134 memmove(&txq->mp2mr[0], &txq->mp2mr[1],
135 (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
137 /* Store the new entry. */
138 txq_ctrl->txq.mp2mr[idx] = mr;
139 DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
140 (void *)txq_ctrl, mp->name, (void *)mp,
141 txq_ctrl->txq.mp2mr[idx]->lkey);
142 rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
146 struct mlx5_mp2mr_mbuf_check_data {
151 * Callback function for rte_mempool_obj_iter() to check whether a given
152 * mempool object looks like a mbuf.
155 * The mempool pointer
157 * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
162 * Object index, unused.
165 txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
166 uint32_t index __rte_unused)
168 struct mlx5_mp2mr_mbuf_check_data *data = arg;
169 struct rte_mbuf *buf = obj;
172 * Check whether mbuf structure fits element size and whether mempool
175 if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
180 * Iterator function for rte_mempool_walk() to register existing mempools and
181 * fill the MP to MR cache of a TX queue.
184 * Memory Pool to register.
186 * Pointer to TX queue structure.
189 mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
191 struct priv *priv = (struct priv *)arg;
192 struct mlx5_mp2mr_mbuf_check_data data = {
197 /* Register mempool only if the first element looks like a mbuf. */
198 if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
201 mr = mlx5_mr_get(priv->dev, mp);
206 mlx5_mr_new(priv->dev, mp);
210 * Register a new memory region from the mempool and store it in the memory
214 * Pointer to Ethernet device.
216 * Pointer to the memory pool to register.
219 * The memory region on success.
222 mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
224 struct priv *priv = dev->data->dev_private;
225 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
231 mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
233 DEBUG("unable to configure MR, ibv_reg_mr() failed.");
236 if (mlx5_check_mempool(mp, &start, &end) != 0) {
237 ERROR("mempool %p: not virtually contiguous",
241 DEBUG("mempool %p area start=%p end=%p size=%zu",
242 (void *)mp, (void *)start, (void *)end,
243 (size_t)(end - start));
244 /* Save original addresses for exact MR lookup. */
247 /* Round start and end to page boundary if found in memory segments. */
248 for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
249 uintptr_t addr = (uintptr_t)ms[i].addr;
250 size_t len = ms[i].len;
251 unsigned int align = ms[i].hugepage_sz;
253 if ((start > addr) && (start < addr + len))
254 start = RTE_ALIGN_FLOOR(start, align);
255 if ((end > addr) && (end < addr + len))
256 end = RTE_ALIGN_CEIL(end, align);
258 DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
259 (void *)mp, (void *)start, (void *)end,
260 (size_t)(end - start));
261 mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start,
262 IBV_ACCESS_LOCAL_WRITE);
264 mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
265 rte_atomic32_inc(&mr->refcnt);
266 DEBUG("%p: new Memory Region %p refcnt: %d", (void *)dev,
267 (void *)mr, rte_atomic32_read(&mr->refcnt));
268 LIST_INSERT_HEAD(&priv->mr, mr, next);
273 * Search the memory region object in the memory region list.
276 * Pointer to Ethernet device.
278 * Pointer to the memory pool to register.
281 * The memory region on success.
284 mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp)
286 struct priv *priv = dev->data->dev_private;
290 if (LIST_EMPTY(&priv->mr))
292 LIST_FOREACH(mr, &priv->mr, next) {
294 rte_atomic32_inc(&mr->refcnt);
295 DEBUG("Memory Region %p refcnt: %d",
296 (void *)mr, rte_atomic32_read(&mr->refcnt));
304 * Release the memory region object.
307 * Pointer to memory region to release.
310 * 0 on success, errno on failure.
313 mlx5_mr_release(struct mlx5_mr *mr)
316 DEBUG("Memory Region %p refcnt: %d",
317 (void *)mr, rte_atomic32_read(&mr->refcnt));
318 if (rte_atomic32_dec_and_test(&mr->refcnt)) {
319 claim_zero(mlx5_glue->dereg_mr(mr->mr));
320 LIST_REMOVE(mr, next);
328 * Verify the flow list is empty
331 * Pointer to Ethernet device.
334 * The number of object not released.
337 mlx5_mr_verify(struct rte_eth_dev *dev)
339 struct priv *priv = dev->data->dev_private;
343 LIST_FOREACH(mr, &priv->mr, next) {
344 DEBUG("%p: mr %p still referenced", (void *)dev,