1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
7 #pragma GCC diagnostic ignored "-Wpedantic"
9 #include <infiniband/verbs.h>
11 #pragma GCC diagnostic error "-Wpedantic"
14 #include <rte_eal_memconfig.h>
15 #include <rte_mempool.h>
16 #include <rte_malloc.h>
17 #include <rte_rwlock.h>
18 #include <rte_bus_pci.h>
20 #include <mlx5_glue.h>
21 #include <mlx5_common_mp.h>
22 #include <mlx5_common_mr.h>
26 #include "mlx5_rxtx.h"
28 struct mr_find_contig_memsegs_data {
32 const struct rte_memseg_list *msl;
35 struct mr_update_mp_data {
36 struct rte_eth_dev *dev;
37 struct mlx5_mr_ctrl *mr_ctrl;
42 * Callback for memory free event. Iterate freed memsegs and check whether it
43 * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
44 * result, the MR would be fragmented. If it becomes empty, the MR will be freed
45 * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
46 * secondary process, the garbage collector will be called in primary process
47 * as the secondary process can't call mlx5_mr_create().
49 * The global cache must be rebuilt if there's any change and this event has to
50 * be propagated to dataplane threads to flush the local caches.
53 * Pointer to the Ethernet device shared context.
55 * Address of freed memory.
57 * Size of freed memory.
60 mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh,
61 const void *addr, size_t len)
63 const struct rte_memseg_list *msl;
69 DEBUG("device %s free callback: addr=%p, len=%zu",
70 sh->ibdev_name, addr, len);
71 msl = rte_mem_virt2memseg_list(addr);
72 /* addr and len must be page-aligned. */
73 MLX5_ASSERT((uintptr_t)addr ==
74 RTE_ALIGN((uintptr_t)addr, msl->page_sz));
75 MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
76 ms_n = len / msl->page_sz;
77 rte_rwlock_write_lock(&sh->share_cache.rwlock);
78 /* Clear bits of freed memsegs from MR. */
79 for (i = 0; i < ms_n; ++i) {
80 const struct rte_memseg *ms;
81 struct mr_cache_entry entry;
86 /* Find MR having this memseg. */
87 start = (uintptr_t)addr + i * msl->page_sz;
88 mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, start);
91 MLX5_ASSERT(mr->msl); /* Can't be external memory. */
92 ms = rte_mem_virt2memseg((void *)start, msl);
93 MLX5_ASSERT(ms != NULL);
94 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
95 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
96 pos = ms_idx - mr->ms_base_idx;
97 MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
98 MLX5_ASSERT(pos < mr->ms_bmp_n);
99 DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
100 sh->ibdev_name, (void *)mr, pos, (void *)start);
101 rte_bitmap_clear(mr->ms_bmp, pos);
102 if (--mr->ms_n == 0) {
104 LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr);
105 DEBUG("device %s remove MR(%p) from list",
106 sh->ibdev_name, (void *)mr);
109 * MR is fragmented or will be freed. the global cache must be
115 mlx5_mr_rebuild_cache(&sh->share_cache);
117 * Flush local caches by propagating invalidation across cores.
118 * rte_smp_wmb() is enough to synchronize this event. If one of
119 * freed memsegs is seen by other core, that means the memseg
120 * has been allocated by allocator, which will come after this
121 * free call. Therefore, this store instruction (incrementing
122 * generation below) will be guaranteed to be seen by other core
123 * before the core sees the newly allocated memory.
125 ++sh->share_cache.dev_gen;
126 DEBUG("broadcasting local cache flush, gen=%d",
127 sh->share_cache.dev_gen);
130 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
134 * Callback for memory event. This can be called from both primary and secondary
145 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
146 size_t len, void *arg __rte_unused)
148 struct mlx5_dev_ctx_shared *sh;
149 struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
151 /* Must be called from the primary process. */
152 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
153 switch (event_type) {
154 case RTE_MEM_EVENT_FREE:
155 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
156 /* Iterate all the existing mlx5 devices. */
157 LIST_FOREACH(sh, dev_list, mem_event_cb)
158 mlx5_mr_mem_event_free_cb(sh, addr, len);
159 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
161 case RTE_MEM_EVENT_ALLOC:
168 * Bottom-half of LKey search on Rx.
171 * Pointer to Rx queue structure.
176 * Searched LKey on success, UINT32_MAX on no match.
179 mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
181 struct mlx5_rxq_ctrl *rxq_ctrl =
182 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
183 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
184 struct mlx5_priv *priv = rxq_ctrl->priv;
186 return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
187 &priv->sh->share_cache, mr_ctrl, addr,
188 priv->config.mr_ext_memseg_en);
192 * Bottom-half of LKey search on Tx.
195 * Pointer to Tx queue structure.
200 * Searched LKey on success, UINT32_MAX on no match.
203 mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
205 struct mlx5_txq_ctrl *txq_ctrl =
206 container_of(txq, struct mlx5_txq_ctrl, txq);
207 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
208 struct mlx5_priv *priv = txq_ctrl->priv;
210 return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
211 &priv->sh->share_cache, mr_ctrl, addr,
212 priv->config.mr_ext_memseg_en);
216 * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
217 * list, register the mempool of the mbuf as externally allocated memory.
220 * Pointer to Tx queue structure.
225 * Searched LKey on success, UINT32_MAX on no match.
228 mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
230 uintptr_t addr = (uintptr_t)mb->buf_addr;
233 lkey = mlx5_tx_addr2mr_bh(txq, addr);
234 if (lkey == UINT32_MAX && rte_errno == ENXIO) {
235 /* Mempool may have externally allocated memory. */
236 return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
242 * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
244 * Externally allocated chunk is registered and a MR is created for the chunk.
245 * The MR object is added to the global list. If memseg list of a MR object
246 * (mr->msl) is null, the MR object can be regarded as externally allocated
249 * Once external memory is registered, it should be static. If the memory is
250 * freed and the virtual address range has different physical memory mapped
251 * again, it may cause crash on device due to the wrong translation entry. PMD
252 * can't track the free event of the external memory for now.
255 mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
256 struct rte_mempool_memhdr *memhdr,
257 unsigned mem_idx __rte_unused)
259 struct mr_update_mp_data *data = opaque;
260 struct rte_eth_dev *dev = data->dev;
261 struct mlx5_priv *priv = dev->data->dev_private;
262 struct mlx5_dev_ctx_shared *sh = priv->sh;
263 struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
264 struct mlx5_mr *mr = NULL;
265 uintptr_t addr = (uintptr_t)memhdr->addr;
266 size_t len = memhdr->len;
267 struct mr_cache_entry entry;
270 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
271 /* If already registered, it should return. */
272 rte_rwlock_read_lock(&sh->share_cache.rwlock);
273 lkey = mlx5_mr_lookup_cache(&sh->share_cache, &entry, addr);
274 rte_rwlock_read_unlock(&sh->share_cache.rwlock);
275 if (lkey != UINT32_MAX)
277 DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
278 dev->data->port_id, mem_idx, mp->name);
279 mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
280 sh->share_cache.reg_mr_cb);
283 "port %u unable to allocate a new MR of"
285 dev->data->port_id, mp->name);
289 rte_rwlock_write_lock(&sh->share_cache.rwlock);
290 LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
291 /* Insert to the global cache table. */
292 mlx5_mr_insert_cache(&sh->share_cache, mr);
293 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
294 /* Insert to the local cache table */
295 mlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache,
296 mr_ctrl, addr, priv->config.mr_ext_memseg_en);
300 * Finds the first ethdev that match the pci device.
301 * The existence of multiple ethdev per pci device is only with representors.
302 * On such case, it is enough to get only one of the ports as they all share
303 * the same ibv context.
306 * Pointer to the PCI device.
309 * Pointer to the ethdev if found, NULL otherwise.
311 static struct rte_eth_dev *
312 pci_dev_to_eth_dev(struct rte_pci_device *pdev)
316 RTE_ETH_FOREACH_DEV_OF(port_id, &pdev->device)
317 return &rte_eth_devices[port_id];
322 * DPDK callback to DMA map external memory to a PCI device.
325 * Pointer to the PCI device.
327 * Starting virtual address of memory to be mapped.
329 * Starting IOVA address of memory to be mapped.
331 * Length of memory segment being mapped.
334 * 0 on success, negative value on error.
337 mlx5_dma_map(struct rte_pci_device *pdev, void *addr,
338 uint64_t iova __rte_unused, size_t len)
340 struct rte_eth_dev *dev;
342 struct mlx5_priv *priv;
343 struct mlx5_dev_ctx_shared *sh;
345 dev = pci_dev_to_eth_dev(pdev);
347 DRV_LOG(WARNING, "unable to find matching ethdev "
348 "to PCI device %p", (void *)pdev);
352 priv = dev->data->dev_private;
354 mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
355 sh->share_cache.reg_mr_cb);
358 "port %u unable to dma map", dev->data->port_id);
362 rte_rwlock_write_lock(&sh->share_cache.rwlock);
363 LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
364 /* Insert to the global cache table. */
365 mlx5_mr_insert_cache(&sh->share_cache, mr);
366 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
371 * DPDK callback to DMA unmap external memory to a PCI device.
374 * Pointer to the PCI device.
376 * Starting virtual address of memory to be unmapped.
378 * Starting IOVA address of memory to be unmapped.
380 * Length of memory segment being unmapped.
383 * 0 on success, negative value on error.
386 mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr,
387 uint64_t iova __rte_unused, size_t len __rte_unused)
389 struct rte_eth_dev *dev;
390 struct mlx5_priv *priv;
391 struct mlx5_dev_ctx_shared *sh;
393 struct mr_cache_entry entry;
395 dev = pci_dev_to_eth_dev(pdev);
397 DRV_LOG(WARNING, "unable to find matching ethdev "
398 "to PCI device %p", (void *)pdev);
402 priv = dev->data->dev_private;
404 rte_rwlock_read_lock(&sh->share_cache.rwlock);
405 mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr);
407 rte_rwlock_read_unlock(&sh->share_cache.rwlock);
408 DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered "
409 "to PCI device %p", (uintptr_t)addr,
415 LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr);
416 DEBUG("port %u remove MR(%p) from list", dev->data->port_id,
418 mlx5_mr_rebuild_cache(&sh->share_cache);
420 * Flush local caches by propagating invalidation across cores.
421 * rte_smp_wmb() is enough to synchronize this event. If one of
422 * freed memsegs is seen by other core, that means the memseg
423 * has been allocated by allocator, which will come after this
424 * free call. Therefore, this store instruction (incrementing
425 * generation below) will be guaranteed to be seen by other core
426 * before the core sees the newly allocated memory.
428 ++sh->share_cache.dev_gen;
429 DEBUG("broadcasting local cache flush, gen=%d",
430 sh->share_cache.dev_gen);
432 rte_rwlock_read_unlock(&sh->share_cache.rwlock);
437 * Register MR for entire memory chunks in a Mempool having externally allocated
438 * memory and fill in local cache.
441 * Pointer to Ethernet device.
443 * Pointer to per-queue MR control structure.
445 * Pointer to registering Mempool.
448 * 0 on success, -1 on failure.
451 mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
452 struct rte_mempool *mp)
454 struct mr_update_mp_data data = {
460 rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data);
465 * Register MR entire memory chunks in a Mempool having externally allocated
466 * memory and search LKey of the address to return.
469 * Pointer to Ethernet device.
473 * Pointer to registering Mempool where addr belongs.
476 * LKey for address on success, UINT32_MAX on failure.
479 mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
480 struct rte_mempool *mp)
482 struct mlx5_txq_ctrl *txq_ctrl =
483 container_of(txq, struct mlx5_txq_ctrl, txq);
484 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
485 struct mlx5_priv *priv = txq_ctrl->priv;
487 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
489 "port %u using address (%p) from unregistered mempool"
490 " having externally allocated memory"
491 " in secondary process, please create mempool"
492 " prior to rte_eth_dev_start()",
493 PORT_ID(priv), (void *)addr);
496 mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
497 return mlx5_tx_addr2mr_bh(txq, addr);
500 /* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
502 mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
503 struct rte_mempool_memhdr *memhdr,
504 unsigned mem_idx __rte_unused)
506 struct mr_update_mp_data *data = opaque;
507 struct rte_eth_dev *dev = data->dev;
508 struct mlx5_priv *priv = dev->data->dev_private;
512 /* Stop iteration if failed in the previous walk. */
515 /* Register address of the chunk and update local caches. */
516 lkey = mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
517 &priv->sh->share_cache, data->mr_ctrl,
518 (uintptr_t)memhdr->addr,
519 priv->config.mr_ext_memseg_en);
520 if (lkey == UINT32_MAX)
525 * Register entire memory chunks in a Mempool.
528 * Pointer to Ethernet device.
530 * Pointer to per-queue MR control structure.
532 * Pointer to registering Mempool.
535 * 0 on success, -1 on failure.
538 mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
539 struct rte_mempool *mp)
541 struct mr_update_mp_data data = {
547 rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
548 if (data.ret < 0 && rte_errno == ENXIO) {
549 /* Mempool may have externally allocated memory. */
550 return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp);