1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
6 #include <rte_eal_memconfig.h>
7 #include <rte_mempool.h>
8 #include <rte_malloc.h>
9 #include <rte_rwlock.h>
11 #include <mlx5_common_mp.h>
12 #include <mlx5_common_mr.h>
16 #include "mlx5_rxtx.h"
20 struct mr_find_contig_memsegs_data {
24 const struct rte_memseg_list *msl;
27 struct mr_update_mp_data {
28 struct rte_eth_dev *dev;
29 struct mlx5_mr_ctrl *mr_ctrl;
34 * Callback for memory event. This can be called from both primary and secondary
45 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
46 size_t len, void *arg __rte_unused)
48 struct mlx5_dev_ctx_shared *sh;
49 struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
51 /* Must be called from the primary process. */
52 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
54 case RTE_MEM_EVENT_FREE:
55 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
56 /* Iterate all the existing mlx5 devices. */
57 LIST_FOREACH(sh, dev_list, mem_event_cb)
58 mlx5_free_mr_by_addr(&sh->share_cache,
59 sh->ibdev_name, addr, len);
60 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
62 case RTE_MEM_EVENT_ALLOC:
69 * Bottom-half of LKey search on Tx.
72 * Pointer to Tx queue structure.
77 * Searched LKey on success, UINT32_MAX on no match.
80 mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
82 struct mlx5_txq_ctrl *txq_ctrl =
83 container_of(txq, struct mlx5_txq_ctrl, txq);
84 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
85 struct mlx5_priv *priv = txq_ctrl->priv;
87 return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
88 &priv->sh->share_cache, mr_ctrl, addr,
89 priv->config.mr_ext_memseg_en);
93 * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
94 * list, register the mempool of the mbuf as externally allocated memory.
97 * Pointer to Tx queue structure.
102 * Searched LKey on success, UINT32_MAX on no match.
105 mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
107 struct mlx5_txq_ctrl *txq_ctrl =
108 container_of(txq, struct mlx5_txq_ctrl, txq);
109 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
110 struct mlx5_priv *priv = txq_ctrl->priv;
111 uintptr_t addr = (uintptr_t)mb->buf_addr;
114 if (priv->config.mr_mempool_reg_en) {
115 struct rte_mempool *mp = NULL;
116 struct mlx5_mprq_buf *buf;
118 if (!RTE_MBUF_HAS_EXTBUF(mb)) {
120 } else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
121 /* Recover MPRQ mempool. */
122 buf = mb->shinfo->fcb_opaque;
126 lkey = mlx5_mr_mempool2mr_bh(&priv->sh->share_cache,
129 * Lookup can only fail on invalid input, e.g. "addr"
130 * is not from "mp" or "mp" has RTE_MEMPOOL_F_NON_IO set.
132 if (lkey != UINT32_MAX)
135 /* Fallback for generic mechanism in corner cases. */
137 lkey = mlx5_tx_addr2mr_bh(txq, addr);
138 if (lkey == UINT32_MAX && rte_errno == ENXIO) {
139 /* Mempool may have externally allocated memory. */
140 return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
146 * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
148 * Externally allocated chunk is registered and a MR is created for the chunk.
149 * The MR object is added to the global list. If memseg list of a MR object
150 * (mr->msl) is null, the MR object can be regarded as externally allocated
153 * Once external memory is registered, it should be static. If the memory is
154 * freed and the virtual address range has different physical memory mapped
155 * again, it may cause crash on device due to the wrong translation entry. PMD
156 * can't track the free event of the external memory for now.
159 mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
160 struct rte_mempool_memhdr *memhdr,
161 unsigned mem_idx __rte_unused)
163 struct mr_update_mp_data *data = opaque;
164 struct rte_eth_dev *dev = data->dev;
165 struct mlx5_priv *priv = dev->data->dev_private;
166 struct mlx5_dev_ctx_shared *sh = priv->sh;
167 struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
168 struct mlx5_mr *mr = NULL;
169 uintptr_t addr = (uintptr_t)memhdr->addr;
170 size_t len = memhdr->len;
171 struct mr_cache_entry entry;
174 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
175 /* If already registered, it should return. */
176 rte_rwlock_read_lock(&sh->share_cache.rwlock);
177 lkey = mlx5_mr_lookup_cache(&sh->share_cache, &entry, addr);
178 rte_rwlock_read_unlock(&sh->share_cache.rwlock);
179 if (lkey != UINT32_MAX)
181 DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
182 dev->data->port_id, mem_idx, mp->name);
183 mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
184 sh->share_cache.reg_mr_cb);
187 "port %u unable to allocate a new MR of"
189 dev->data->port_id, mp->name);
193 rte_rwlock_write_lock(&sh->share_cache.rwlock);
194 LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
195 /* Insert to the global cache table. */
196 mlx5_mr_insert_cache(&sh->share_cache, mr);
197 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
198 /* Insert to the local cache table */
199 mlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache,
200 mr_ctrl, addr, priv->config.mr_ext_memseg_en);
204 * Finds the first ethdev that match the device.
205 * The existence of multiple ethdev per pci device is only with representors.
206 * On such case, it is enough to get only one of the ports as they all share
207 * the same ibv context.
210 * Pointer to the device.
213 * Pointer to the ethdev if found, NULL otherwise.
215 static struct rte_eth_dev *
216 dev_to_eth_dev(struct rte_device *dev)
220 port_id = rte_eth_find_next_of(0, dev);
221 if (port_id == RTE_MAX_ETHPORTS)
223 return &rte_eth_devices[port_id];
227 * Callback to DMA map external memory to a device.
230 * Pointer to the generic device.
232 * Starting virtual address of memory to be mapped.
234 * Starting IOVA address of memory to be mapped.
236 * Length of memory segment being mapped.
239 * 0 on success, negative value on error.
242 mlx5_net_dma_map(struct rte_device *rte_dev, void *addr,
243 uint64_t iova __rte_unused, size_t len)
245 struct rte_eth_dev *dev;
247 struct mlx5_priv *priv;
248 struct mlx5_dev_ctx_shared *sh;
250 dev = dev_to_eth_dev(rte_dev);
252 DRV_LOG(WARNING, "unable to find matching ethdev "
253 "to device %s", rte_dev->name);
257 priv = dev->data->dev_private;
259 mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
260 sh->share_cache.reg_mr_cb);
263 "port %u unable to dma map", dev->data->port_id);
267 rte_rwlock_write_lock(&sh->share_cache.rwlock);
268 LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
269 /* Insert to the global cache table. */
270 mlx5_mr_insert_cache(&sh->share_cache, mr);
271 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
276 * Callback to DMA unmap external memory to a device.
279 * Pointer to the generic device.
281 * Starting virtual address of memory to be unmapped.
283 * Starting IOVA address of memory to be unmapped.
285 * Length of memory segment being unmapped.
288 * 0 on success, negative value on error.
291 mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr,
292 uint64_t iova __rte_unused, size_t len __rte_unused)
294 struct rte_eth_dev *dev;
295 struct mlx5_priv *priv;
296 struct mlx5_dev_ctx_shared *sh;
298 struct mr_cache_entry entry;
300 dev = dev_to_eth_dev(rte_dev);
302 DRV_LOG(WARNING, "unable to find matching ethdev to device %s",
307 priv = dev->data->dev_private;
309 rte_rwlock_write_lock(&sh->share_cache.rwlock);
310 mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr);
312 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
313 DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered to device %s",
314 (uintptr_t)addr, rte_dev->name);
319 DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id,
321 mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb);
322 mlx5_mr_rebuild_cache(&sh->share_cache);
324 * No explicit wmb is needed after updating dev_gen due to
325 * store-release ordering in unlock that provides the
326 * implicit barrier at the software visible level.
328 ++sh->share_cache.dev_gen;
329 DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
330 sh->share_cache.dev_gen);
331 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
336 * Register MR for entire memory chunks in a Mempool having externally allocated
337 * memory and fill in local cache.
340 * Pointer to Ethernet device.
342 * Pointer to per-queue MR control structure.
344 * Pointer to registering Mempool.
347 * 0 on success, -1 on failure.
350 mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
351 struct rte_mempool *mp)
353 struct mr_update_mp_data data = {
359 rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data);
364 * Register MR entire memory chunks in a Mempool having externally allocated
365 * memory and search LKey of the address to return.
368 * Pointer to Ethernet device.
372 * Pointer to registering Mempool where addr belongs.
375 * LKey for address on success, UINT32_MAX on failure.
378 mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
379 struct rte_mempool *mp)
381 struct mlx5_txq_ctrl *txq_ctrl =
382 container_of(txq, struct mlx5_txq_ctrl, txq);
383 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
384 struct mlx5_priv *priv = txq_ctrl->priv;
386 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
388 "port %u using address (%p) from unregistered mempool"
389 " having externally allocated memory"
390 " in secondary process, please create mempool"
391 " prior to rte_eth_dev_start()",
392 PORT_ID(priv), (void *)addr);
395 mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
396 return mlx5_tx_addr2mr_bh(txq, addr);