1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
6 #include <rte_eal_memconfig.h>
7 #include <rte_mempool.h>
8 #include <rte_malloc.h>
9 #include <rte_rwlock.h>
11 #include <mlx5_common_mp.h>
12 #include <mlx5_common_mr.h>
16 #include "mlx5_rxtx.h"
21 * Callback for memory event. This can be called from both primary and secondary
32 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
33 size_t len, void *arg __rte_unused)
35 struct mlx5_dev_ctx_shared *sh;
36 struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
38 /* Must be called from the primary process. */
39 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
41 case RTE_MEM_EVENT_FREE:
42 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
43 /* Iterate all the existing mlx5 devices. */
44 LIST_FOREACH(sh, dev_list, mem_event_cb)
45 mlx5_free_mr_by_addr(&sh->share_cache,
46 sh->ibdev_name, addr, len);
47 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
49 case RTE_MEM_EVENT_ALLOC:
56 * Bottom-half of LKey search on Tx.
59 * Pointer to Tx queue structure.
64 * Searched LKey on success, UINT32_MAX on no match.
67 mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
69 struct mlx5_txq_ctrl *txq_ctrl =
70 container_of(txq, struct mlx5_txq_ctrl, txq);
71 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
72 struct mlx5_priv *priv = txq_ctrl->priv;
74 return mlx5_mr_addr2mr_bh(priv->sh->cdev->pd, &priv->mp_id,
75 &priv->sh->share_cache, mr_ctrl, addr,
76 priv->sh->cdev->config.mr_ext_memseg_en);
80 * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
81 * list, register the mempool of the mbuf as externally allocated memory.
84 * Pointer to Tx queue structure.
89 * Searched LKey on success, UINT32_MAX on no match.
92 mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
94 struct mlx5_txq_ctrl *txq_ctrl =
95 container_of(txq, struct mlx5_txq_ctrl, txq);
96 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
97 struct mlx5_priv *priv = txq_ctrl->priv;
98 uintptr_t addr = (uintptr_t)mb->buf_addr;
101 if (priv->sh->cdev->config.mr_mempool_reg_en) {
102 struct rte_mempool *mp = NULL;
103 struct mlx5_mprq_buf *buf;
105 if (!RTE_MBUF_HAS_EXTBUF(mb)) {
107 } else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
108 /* Recover MPRQ mempool. */
109 buf = mb->shinfo->fcb_opaque;
113 lkey = mlx5_mr_mempool2mr_bh(&priv->sh->share_cache,
116 * Lookup can only fail on invalid input, e.g. "addr"
117 * is not from "mp" or "mp" has RTE_MEMPOOL_F_NON_IO set.
119 if (lkey != UINT32_MAX)
122 /* Fallback for generic mechanism in corner cases. */
124 return mlx5_tx_addr2mr_bh(txq, addr);
128 * Finds the first ethdev that match the device.
129 * The existence of multiple ethdev per pci device is only with representors.
130 * On such case, it is enough to get only one of the ports as they all share
131 * the same ibv context.
134 * Pointer to the device.
137 * Pointer to the ethdev if found, NULL otherwise.
139 static struct rte_eth_dev *
140 dev_to_eth_dev(struct rte_device *dev)
144 port_id = rte_eth_find_next_of(0, dev);
145 if (port_id == RTE_MAX_ETHPORTS)
147 return &rte_eth_devices[port_id];
151 * Callback to DMA map external memory to a device.
154 * Pointer to the generic device.
156 * Starting virtual address of memory to be mapped.
158 * Starting IOVA address of memory to be mapped.
160 * Length of memory segment being mapped.
163 * 0 on success, negative value on error.
166 mlx5_net_dma_map(struct rte_device *rte_dev, void *addr,
167 uint64_t iova __rte_unused, size_t len)
169 struct rte_eth_dev *dev;
171 struct mlx5_priv *priv;
172 struct mlx5_dev_ctx_shared *sh;
174 dev = dev_to_eth_dev(rte_dev);
176 DRV_LOG(WARNING, "unable to find matching ethdev "
177 "to device %s", rte_dev->name);
181 priv = dev->data->dev_private;
183 mr = mlx5_create_mr_ext(sh->cdev->pd, (uintptr_t)addr, len,
184 SOCKET_ID_ANY, sh->share_cache.reg_mr_cb);
187 "port %u unable to dma map", dev->data->port_id);
191 rte_rwlock_write_lock(&sh->share_cache.rwlock);
192 LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
193 /* Insert to the global cache table. */
194 mlx5_mr_insert_cache(&sh->share_cache, mr);
195 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
200 * Callback to DMA unmap external memory to a device.
203 * Pointer to the generic device.
205 * Starting virtual address of memory to be unmapped.
207 * Starting IOVA address of memory to be unmapped.
209 * Length of memory segment being unmapped.
212 * 0 on success, negative value on error.
215 mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr,
216 uint64_t iova __rte_unused, size_t len __rte_unused)
218 struct rte_eth_dev *dev;
219 struct mlx5_priv *priv;
220 struct mlx5_dev_ctx_shared *sh;
222 struct mr_cache_entry entry;
224 dev = dev_to_eth_dev(rte_dev);
226 DRV_LOG(WARNING, "unable to find matching ethdev to device %s",
231 priv = dev->data->dev_private;
233 rte_rwlock_write_lock(&sh->share_cache.rwlock);
234 mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr);
236 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
237 DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered to device %s",
238 (uintptr_t)addr, rte_dev->name);
243 DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id,
245 mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb);
246 mlx5_mr_rebuild_cache(&sh->share_cache);
248 * No explicit wmb is needed after updating dev_gen due to
249 * store-release ordering in unlock that provides the
250 * implicit barrier at the software visible level.
252 ++sh->share_cache.dev_gen;
253 DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
254 sh->share_cache.dev_gen);
255 rte_rwlock_write_unlock(&sh->share_cache.rwlock);