1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
6 #include <rte_eal_memconfig.h>
7 #include <rte_mempool.h>
8 #include <rte_malloc.h>
9 #include <rte_rwlock.h>
10 #include <rte_bus_pci.h>
12 #include <mlx5_common_mp.h>
13 #include <mlx5_common_mr.h>
17 #include "mlx5_rxtx.h"
21 struct mr_find_contig_memsegs_data {
25 const struct rte_memseg_list *msl;
28 struct mr_update_mp_data {
29 struct rte_eth_dev *dev;
30 struct mlx5_mr_ctrl *mr_ctrl;
35 * Callback for memory free event. Iterate freed memsegs and check whether it
36 * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
37 * result, the MR would be fragmented. If it becomes empty, the MR will be freed
38 * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
39 * secondary process, the garbage collector will be called in primary process
40 * as the secondary process can't call mlx5_mr_create().
42 * The global cache must be rebuilt if there's any change and this event has to
43 * be propagated to dataplane threads to flush the local caches.
46 * Pointer to the Ethernet device shared context.
48 * Address of freed memory.
50 * Size of freed memory.
53 mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh,
54 const void *addr, size_t len)
56 const struct rte_memseg_list *msl;
62 DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu",
63 sh->ibdev_name, addr, len);
64 msl = rte_mem_virt2memseg_list(addr);
65 /* addr and len must be page-aligned. */
66 MLX5_ASSERT((uintptr_t)addr ==
67 RTE_ALIGN((uintptr_t)addr, msl->page_sz));
68 MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
69 ms_n = len / msl->page_sz;
70 rte_rwlock_write_lock(&sh->share_cache.rwlock);
71 /* Clear bits of freed memsegs from MR. */
72 for (i = 0; i < ms_n; ++i) {
73 const struct rte_memseg *ms;
74 struct mr_cache_entry entry;
79 /* Find MR having this memseg. */
80 start = (uintptr_t)addr + i * msl->page_sz;
81 mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, start);
84 MLX5_ASSERT(mr->msl); /* Can't be external memory. */
85 ms = rte_mem_virt2memseg((void *)start, msl);
86 MLX5_ASSERT(ms != NULL);
87 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
88 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
89 pos = ms_idx - mr->ms_base_idx;
90 MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
91 MLX5_ASSERT(pos < mr->ms_bmp_n);
92 DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p",
93 sh->ibdev_name, (void *)mr, pos, (void *)start);
94 rte_bitmap_clear(mr->ms_bmp, pos);
95 if (--mr->ms_n == 0) {
97 LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr);
98 DRV_LOG(DEBUG, "device %s remove MR(%p) from list",
99 sh->ibdev_name, (void *)mr);
102 * MR is fragmented or will be freed. the global cache must be
108 mlx5_mr_rebuild_cache(&sh->share_cache);
110 * Flush local caches by propagating invalidation across cores.
111 * rte_smp_wmb() is enough to synchronize this event. If one of
112 * freed memsegs is seen by other core, that means the memseg
113 * has been allocated by allocator, which will come after this
114 * free call. Therefore, this store instruction (incrementing
115 * generation below) will be guaranteed to be seen by other core
116 * before the core sees the newly allocated memory.
118 ++sh->share_cache.dev_gen;
119 DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
120 sh->share_cache.dev_gen);
123 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
127 * Callback for memory event. This can be called from both primary and secondary
138 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
139 size_t len, void *arg __rte_unused)
141 struct mlx5_dev_ctx_shared *sh;
142 struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
144 /* Must be called from the primary process. */
145 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
146 switch (event_type) {
147 case RTE_MEM_EVENT_FREE:
148 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
149 /* Iterate all the existing mlx5 devices. */
150 LIST_FOREACH(sh, dev_list, mem_event_cb)
151 mlx5_mr_mem_event_free_cb(sh, addr, len);
152 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
154 case RTE_MEM_EVENT_ALLOC:
161 * Bottom-half of LKey search on Rx.
164 * Pointer to Rx queue structure.
169 * Searched LKey on success, UINT32_MAX on no match.
172 mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
174 struct mlx5_rxq_ctrl *rxq_ctrl =
175 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
176 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
177 struct mlx5_priv *priv = rxq_ctrl->priv;
179 return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
180 &priv->sh->share_cache, mr_ctrl, addr,
181 priv->config.mr_ext_memseg_en);
185 * Bottom-half of LKey search on Tx.
188 * Pointer to Tx queue structure.
193 * Searched LKey on success, UINT32_MAX on no match.
196 mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
198 struct mlx5_txq_ctrl *txq_ctrl =
199 container_of(txq, struct mlx5_txq_ctrl, txq);
200 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
201 struct mlx5_priv *priv = txq_ctrl->priv;
203 return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
204 &priv->sh->share_cache, mr_ctrl, addr,
205 priv->config.mr_ext_memseg_en);
209 * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
210 * list, register the mempool of the mbuf as externally allocated memory.
213 * Pointer to Tx queue structure.
218 * Searched LKey on success, UINT32_MAX on no match.
221 mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
223 uintptr_t addr = (uintptr_t)mb->buf_addr;
226 lkey = mlx5_tx_addr2mr_bh(txq, addr);
227 if (lkey == UINT32_MAX && rte_errno == ENXIO) {
228 /* Mempool may have externally allocated memory. */
229 return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
235 * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
237 * Externally allocated chunk is registered and a MR is created for the chunk.
238 * The MR object is added to the global list. If memseg list of a MR object
239 * (mr->msl) is null, the MR object can be regarded as externally allocated
242 * Once external memory is registered, it should be static. If the memory is
243 * freed and the virtual address range has different physical memory mapped
244 * again, it may cause crash on device due to the wrong translation entry. PMD
245 * can't track the free event of the external memory for now.
248 mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
249 struct rte_mempool_memhdr *memhdr,
250 unsigned mem_idx __rte_unused)
252 struct mr_update_mp_data *data = opaque;
253 struct rte_eth_dev *dev = data->dev;
254 struct mlx5_priv *priv = dev->data->dev_private;
255 struct mlx5_dev_ctx_shared *sh = priv->sh;
256 struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
257 struct mlx5_mr *mr = NULL;
258 uintptr_t addr = (uintptr_t)memhdr->addr;
259 size_t len = memhdr->len;
260 struct mr_cache_entry entry;
263 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
264 /* If already registered, it should return. */
265 rte_rwlock_read_lock(&sh->share_cache.rwlock);
266 lkey = mlx5_mr_lookup_cache(&sh->share_cache, &entry, addr);
267 rte_rwlock_read_unlock(&sh->share_cache.rwlock);
268 if (lkey != UINT32_MAX)
270 DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
271 dev->data->port_id, mem_idx, mp->name);
272 mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
273 sh->share_cache.reg_mr_cb);
276 "port %u unable to allocate a new MR of"
278 dev->data->port_id, mp->name);
282 rte_rwlock_write_lock(&sh->share_cache.rwlock);
283 LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
284 /* Insert to the global cache table. */
285 mlx5_mr_insert_cache(&sh->share_cache, mr);
286 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
287 /* Insert to the local cache table */
288 mlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache,
289 mr_ctrl, addr, priv->config.mr_ext_memseg_en);
293 * Finds the first ethdev that match the pci device.
294 * The existence of multiple ethdev per pci device is only with representors.
295 * On such case, it is enough to get only one of the ports as they all share
296 * the same ibv context.
299 * Pointer to the PCI device.
302 * Pointer to the ethdev if found, NULL otherwise.
304 static struct rte_eth_dev *
305 pci_dev_to_eth_dev(struct rte_pci_device *pdev)
309 port_id = rte_eth_find_next_of(0, &pdev->device);
310 if (port_id == RTE_MAX_ETHPORTS)
312 return &rte_eth_devices[port_id];
316 * DPDK callback to DMA map external memory to a PCI device.
319 * Pointer to the PCI device.
321 * Starting virtual address of memory to be mapped.
323 * Starting IOVA address of memory to be mapped.
325 * Length of memory segment being mapped.
328 * 0 on success, negative value on error.
331 mlx5_dma_map(struct rte_pci_device *pdev, void *addr,
332 uint64_t iova __rte_unused, size_t len)
334 struct rte_eth_dev *dev;
336 struct mlx5_priv *priv;
337 struct mlx5_dev_ctx_shared *sh;
339 dev = pci_dev_to_eth_dev(pdev);
341 DRV_LOG(WARNING, "unable to find matching ethdev "
342 "to PCI device %p", (void *)pdev);
346 priv = dev->data->dev_private;
348 mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
349 sh->share_cache.reg_mr_cb);
352 "port %u unable to dma map", dev->data->port_id);
356 rte_rwlock_write_lock(&sh->share_cache.rwlock);
357 LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
358 /* Insert to the global cache table. */
359 mlx5_mr_insert_cache(&sh->share_cache, mr);
360 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
365 * DPDK callback to DMA unmap external memory to a PCI device.
368 * Pointer to the PCI device.
370 * Starting virtual address of memory to be unmapped.
372 * Starting IOVA address of memory to be unmapped.
374 * Length of memory segment being unmapped.
377 * 0 on success, negative value on error.
380 mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr,
381 uint64_t iova __rte_unused, size_t len __rte_unused)
383 struct rte_eth_dev *dev;
384 struct mlx5_priv *priv;
385 struct mlx5_dev_ctx_shared *sh;
387 struct mr_cache_entry entry;
389 dev = pci_dev_to_eth_dev(pdev);
391 DRV_LOG(WARNING, "unable to find matching ethdev "
392 "to PCI device %p", (void *)pdev);
396 priv = dev->data->dev_private;
398 rte_rwlock_read_lock(&sh->share_cache.rwlock);
399 mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr);
401 rte_rwlock_read_unlock(&sh->share_cache.rwlock);
402 DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered "
403 "to PCI device %p", (uintptr_t)addr,
409 mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb);
410 DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id,
412 mlx5_mr_rebuild_cache(&sh->share_cache);
414 * Flush local caches by propagating invalidation across cores.
415 * rte_smp_wmb() is enough to synchronize this event. If one of
416 * freed memsegs is seen by other core, that means the memseg
417 * has been allocated by allocator, which will come after this
418 * free call. Therefore, this store instruction (incrementing
419 * generation below) will be guaranteed to be seen by other core
420 * before the core sees the newly allocated memory.
422 ++sh->share_cache.dev_gen;
423 DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
424 sh->share_cache.dev_gen);
426 rte_rwlock_read_unlock(&sh->share_cache.rwlock);
431 * Register MR for entire memory chunks in a Mempool having externally allocated
432 * memory and fill in local cache.
435 * Pointer to Ethernet device.
437 * Pointer to per-queue MR control structure.
439 * Pointer to registering Mempool.
442 * 0 on success, -1 on failure.
445 mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
446 struct rte_mempool *mp)
448 struct mr_update_mp_data data = {
454 rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data);
459 * Register MR entire memory chunks in a Mempool having externally allocated
460 * memory and search LKey of the address to return.
463 * Pointer to Ethernet device.
467 * Pointer to registering Mempool where addr belongs.
470 * LKey for address on success, UINT32_MAX on failure.
473 mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
474 struct rte_mempool *mp)
476 struct mlx5_txq_ctrl *txq_ctrl =
477 container_of(txq, struct mlx5_txq_ctrl, txq);
478 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
479 struct mlx5_priv *priv = txq_ctrl->priv;
481 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
483 "port %u using address (%p) from unregistered mempool"
484 " having externally allocated memory"
485 " in secondary process, please create mempool"
486 " prior to rte_eth_dev_start()",
487 PORT_ID(priv), (void *)addr);
490 mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
491 return mlx5_tx_addr2mr_bh(txq, addr);
494 /* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
496 mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
497 struct rte_mempool_memhdr *memhdr,
498 unsigned mem_idx __rte_unused)
500 struct mr_update_mp_data *data = opaque;
501 struct rte_eth_dev *dev = data->dev;
502 struct mlx5_priv *priv = dev->data->dev_private;
506 /* Stop iteration if failed in the previous walk. */
509 /* Register address of the chunk and update local caches. */
510 lkey = mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
511 &priv->sh->share_cache, data->mr_ctrl,
512 (uintptr_t)memhdr->addr,
513 priv->config.mr_ext_memseg_en);
514 if (lkey == UINT32_MAX)
519 * Register entire memory chunks in a Mempool.
522 * Pointer to Ethernet device.
524 * Pointer to per-queue MR control structure.
526 * Pointer to registering Mempool.
529 * 0 on success, -1 on failure.
532 mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
533 struct rte_mempool *mp)
535 struct mr_update_mp_data data = {
540 uint32_t flags = rte_pktmbuf_priv_flags(mp);
542 if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) {
544 * The pinned external buffer should be registered for DMA
545 * operations by application. The mem_list of the pool contains
546 * the list of chunks with mbuf structures w/o built-in data
547 * buffers and DMA actually does not happen there, no need
548 * to create MR for these chunks.
552 DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
553 "having %u chunks.", dev->data->port_id,
554 mp->name, mp->nb_mem_chunks);
555 rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
556 if (data.ret < 0 && rte_errno == ENXIO) {
557 /* Mempool may have externally allocated memory. */
558 return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp);