/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
+#include <rte_eal_memconfig.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
+#include <rte_rwlock.h>
+#include <rte_bus_pci.h>
+
+#include <mlx5_common_mp.h>
+#include <mlx5_common_mr.h>
#include "mlx5.h"
+#include "mlx5_mr.h"
#include "mlx5_rxtx.h"
-#include "mlx5_glue.h"
-struct mlx5_check_mempool_data {
+struct mr_find_contig_memsegs_data {
+ uintptr_t addr;
+ uintptr_t start;
+ uintptr_t end;
+ const struct rte_memseg_list *msl;
+};
+
+struct mr_update_mp_data {
+ struct rte_eth_dev *dev;
+ struct mlx5_mr_ctrl *mr_ctrl;
int ret;
- char *start;
- char *end;
};
-/* Called by mlx5_check_mempool() when iterating the memory chunks. */
+/**
+ * Callback for memory free event. Iterate freed memsegs and check whether it
+ * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
+ * result, the MR would be fragmented. If it becomes empty, the MR will be freed
+ * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
+ * secondary process, the garbage collector will be called in primary process
+ * as the secondary process can't call mlx5_mr_create().
+ *
+ * The global cache must be rebuilt if there's any change and this event has to
+ * be propagated to dataplane threads to flush the local caches.
+ *
+ * @param sh
+ * Pointer to the Ethernet device shared context.
+ * @param addr
+ * Address of freed memory.
+ * @param len
+ * Size of freed memory.
+ */
static void
-mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused,
- void *opaque, struct rte_mempool_memhdr *memhdr,
- unsigned int mem_idx __rte_unused)
+mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh,
+ const void *addr, size_t len)
{
- struct mlx5_check_mempool_data *data = opaque;
+ const struct rte_memseg_list *msl;
+ struct mlx5_mr *mr;
+ int ms_n;
+ int i;
+ int rebuild = 0;
- /* It already failed, skip the next chunks. */
- if (data->ret != 0)
- return;
- /* It is the first chunk. */
- if (data->start == NULL && data->end == NULL) {
- data->start = memhdr->addr;
- data->end = data->start + memhdr->len;
- return;
+ DEBUG("device %s free callback: addr=%p, len=%zu",
+ sh->ibdev_name, addr, len);
+ msl = rte_mem_virt2memseg_list(addr);
+ /* addr and len must be page-aligned. */
+ MLX5_ASSERT((uintptr_t)addr ==
+ RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
+ ms_n = len / msl->page_sz;
+ rte_rwlock_write_lock(&sh->share_cache.rwlock);
+ /* Clear bits of freed memsegs from MR. */
+ for (i = 0; i < ms_n; ++i) {
+ const struct rte_memseg *ms;
+ struct mr_cache_entry entry;
+ uintptr_t start;
+ int ms_idx;
+ uint32_t pos;
+
+ /* Find MR having this memseg. */
+ start = (uintptr_t)addr + i * msl->page_sz;
+ mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, start);
+ if (mr == NULL)
+ continue;
+ MLX5_ASSERT(mr->msl); /* Can't be external memory. */
+ ms = rte_mem_virt2memseg((void *)start, msl);
+ MLX5_ASSERT(ms != NULL);
+ MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
+ ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ pos = ms_idx - mr->ms_base_idx;
+ MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
+ MLX5_ASSERT(pos < mr->ms_bmp_n);
+ DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
+ sh->ibdev_name, (void *)mr, pos, (void *)start);
+ rte_bitmap_clear(mr->ms_bmp, pos);
+ if (--mr->ms_n == 0) {
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr);
+ DEBUG("device %s remove MR(%p) from list",
+ sh->ibdev_name, (void *)mr);
+ }
+ /*
+ * MR is fragmented or will be freed. the global cache must be
+ * rebuilt.
+ */
+ rebuild = 1;
}
- if (data->end == memhdr->addr) {
- data->end += memhdr->len;
- return;
+ if (rebuild) {
+ mlx5_mr_rebuild_cache(&sh->share_cache);
+ /*
+ * Flush local caches by propagating invalidation across cores.
+ * rte_smp_wmb() is enough to synchronize this event. If one of
+ * freed memsegs is seen by other core, that means the memseg
+ * has been allocated by allocator, which will come after this
+ * free call. Therefore, this store instruction (incrementing
+ * generation below) will be guaranteed to be seen by other core
+ * before the core sees the newly allocated memory.
+ */
+ ++sh->share_cache.dev_gen;
+ DEBUG("broadcasting local cache flush, gen=%d",
+ sh->share_cache.dev_gen);
+ rte_smp_wmb();
}
- if (data->start == (char *)memhdr->addr + memhdr->len) {
- data->start -= memhdr->len;
- return;
+ rte_rwlock_write_unlock(&sh->share_cache.rwlock);
+}
+
+/**
+ * Callback for memory event. This can be called from both primary and secondary
+ * process.
+ *
+ * @param event_type
+ * Memory event type.
+ * @param addr
+ * Address of memory.
+ * @param len
+ * Size of memory.
+ */
+void
+mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg __rte_unused)
+{
+ struct mlx5_dev_ctx_shared *sh;
+ struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
+
+ /* Must be called from the primary process. */
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ switch (event_type) {
+ case RTE_MEM_EVENT_FREE:
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ /* Iterate all the existing mlx5 devices. */
+ LIST_FOREACH(sh, dev_list, mem_event_cb)
+ mlx5_mr_mem_event_free_cb(sh, addr, len);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ break;
+ case RTE_MEM_EVENT_ALLOC:
+ default:
+ break;
}
- /* Error, mempool is not virtually contiguous. */
- data->ret = -1;
}
/**
- * Check if a mempool can be used: it must be virtually contiguous.
+ * Bottom-half of LKey search on Rx.
*
- * @param[in] mp
- * Pointer to memory pool.
- * @param[out] start
- * Pointer to the start address of the mempool virtual memory area
- * @param[out] end
- * Pointer to the end address of the mempool virtual memory area
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Search key.
*
* @return
- * 0 on success (mempool is virtually contiguous), -1 on error.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
-static int
-mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
- uintptr_t *end)
+uint32_t
+mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
{
- struct mlx5_check_mempool_data data;
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ struct mlx5_priv *priv = rxq_ctrl->priv;
- memset(&data, 0, sizeof(data));
- rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
- *start = (uintptr_t)data.start;
- *end = (uintptr_t)data.end;
- return data.ret;
+ return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
+ &priv->sh->share_cache, mr_ctrl, addr,
+ priv->config.mr_ext_memseg_en);
}
/**
- * Register a Memory Region (MR) <-> Memory Pool (MP) association in
- * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
+ * Bottom-half of LKey search on Tx.
*
* @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
- * @param idx
- * Index of the next available entry.
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Search key.
*
* @return
- * mr on success, NULL on failure.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
-struct mlx5_mr *
-mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
- unsigned int idx)
+static uint32_t
+mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
{
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
- struct rte_eth_dev *dev;
- struct mlx5_mr *mr;
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct mlx5_priv *priv = txq_ctrl->priv;
- rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
- /* Add a new entry, register MR first. */
- DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq_ctrl, mp->name, (void *)mp);
- dev = txq_ctrl->priv->dev;
- mr = mlx5_mr_get(dev, mp);
- if (mr == NULL) {
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- DEBUG("Using unregistered mempool 0x%p(%s) in "
- "secondary process, please create mempool before "
- " rte_eth_dev_start()",
- (void *)mp, mp->name);
- rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
- return NULL;
- }
- mr = mlx5_mr_new(dev, mp);
- }
- if (unlikely(mr == NULL)) {
- DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
- (void *)txq_ctrl);
- rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
- return NULL;
- }
- if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
- /* Table is full, remove oldest entry. */
- DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
- (void *)txq_ctrl);
- --idx;
- mlx5_mr_release(txq->mp2mr[0]);
- memmove(&txq->mp2mr[0], &txq->mp2mr[1],
- (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
- }
- /* Store the new entry. */
- txq_ctrl->txq.mp2mr[idx] = mr;
- DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq_ctrl, mp->name, (void *)mp,
- txq_ctrl->txq.mp2mr[idx]->lkey);
- rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
- return mr;
+ return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
+ &priv->sh->share_cache, mr_ctrl, addr,
+ priv->config.mr_ext_memseg_en);
}
-struct mlx5_mp2mr_mbuf_check_data {
- int ret;
-};
-
/**
- * Callback function for rte_mempool_obj_iter() to check whether a given
- * mempool object looks like a mbuf.
+ * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
+ * list, register the mempool of the mbuf as externally allocated memory.
*
- * @param[in] mp
- * The mempool pointer
- * @param[in] arg
- * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
- * return value.
- * @param[in] obj
- * Object address.
- * @param index
- * Object index, unused.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param mb
+ * Pointer to mbuf.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
*/
-static void
-txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
- uint32_t index __rte_unused)
+uint32_t
+mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
{
- struct mlx5_mp2mr_mbuf_check_data *data = arg;
- struct rte_mbuf *buf = obj;
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ uint32_t lkey;
- /*
- * Check whether mbuf structure fits element size and whether mempool
- * pointer is valid.
- */
- if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
- data->ret = -1;
+ lkey = mlx5_tx_addr2mr_bh(txq, addr);
+ if (lkey == UINT32_MAX && rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
+ }
+ return lkey;
}
/**
- * Iterator function for rte_mempool_walk() to register existing mempools and
- * fill the MP to MR cache of a TX queue.
+ * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
+ *
+ * Externally allocated chunk is registered and a MR is created for the chunk.
+ * The MR object is added to the global list. If memseg list of a MR object
+ * (mr->msl) is null, the MR object can be regarded as externally allocated
+ * memory.
*
- * @param[in] mp
- * Memory Pool to register.
- * @param *arg
- * Pointer to TX queue structure.
+ * Once external memory is registered, it should be static. If the memory is
+ * freed and the virtual address range has different physical memory mapped
+ * again, it may cause crash on device due to the wrong translation entry. PMD
+ * can't track the free event of the external memory for now.
*/
-void
-mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
+static void
+mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
{
- struct priv *priv = (struct priv *)arg;
- struct mlx5_mp2mr_mbuf_check_data data = {
- .ret = 0,
- };
- struct mlx5_mr *mr;
+ struct mr_update_mp_data *data = opaque;
+ struct rte_eth_dev *dev = data->dev;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
+ struct mlx5_mr *mr = NULL;
+ uintptr_t addr = (uintptr_t)memhdr->addr;
+ size_t len = memhdr->len;
+ struct mr_cache_entry entry;
+ uint32_t lkey;
- /* Register mempool only if the first element looks like a mbuf. */
- if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
- data.ret == -1)
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ /* If already registered, it should return. */
+ rte_rwlock_read_lock(&sh->share_cache.rwlock);
+ lkey = mlx5_mr_lookup_cache(&sh->share_cache, &entry, addr);
+ rte_rwlock_read_unlock(&sh->share_cache.rwlock);
+ if (lkey != UINT32_MAX)
return;
- mr = mlx5_mr_get(priv->dev, mp);
- if (mr) {
- mlx5_mr_release(mr);
+ DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
+ dev->data->port_id, mem_idx, mp->name);
+ mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
+ sh->share_cache.reg_mr_cb);
+ if (!mr) {
+ DRV_LOG(WARNING,
+ "port %u unable to allocate a new MR of"
+ " mempool (%s).",
+ dev->data->port_id, mp->name);
+ data->ret = -1;
return;
}
- mlx5_mr_new(priv->dev, mp);
+ rte_rwlock_write_lock(&sh->share_cache.rwlock);
+ LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
+ /* Insert to the global cache table. */
+ mlx5_mr_insert_cache(&sh->share_cache, mr);
+ rte_rwlock_write_unlock(&sh->share_cache.rwlock);
+ /* Insert to the local cache table */
+ mlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache,
+ mr_ctrl, addr, priv->config.mr_ext_memseg_en);
}
/**
- * Register a new memory region from the mempool and store it in the memory
- * region list.
+ * Finds the first ethdev that match the pci device.
+ * The existence of multiple ethdev per pci device is only with representors.
+ * On such case, it is enough to get only one of the ports as they all share
+ * the same ibv context.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param mp
- * Pointer to the memory pool to register.
+ * @param pdev
+ * Pointer to the PCI device.
*
* @return
- * The memory region on success.
+ * Pointer to the ethdev if found, NULL otherwise.
*/
-struct mlx5_mr *
-mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
+static struct rte_eth_dev *
+pci_dev_to_eth_dev(struct rte_pci_device *pdev)
{
- struct priv *priv = dev->data->dev_private;
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start;
- uintptr_t end;
- unsigned int i;
+ uint16_t port_id;
+
+ port_id = rte_eth_find_next_of(0, &pdev->device);
+ if (port_id == RTE_MAX_ETHPORTS)
+ return NULL;
+ return &rte_eth_devices[port_id];
+}
+
+/**
+ * DPDK callback to DMA map external memory to a PCI device.
+ *
+ * @param pdev
+ * Pointer to the PCI device.
+ * @param addr
+ * Starting virtual address of memory to be mapped.
+ * @param iova
+ * Starting IOVA address of memory to be mapped.
+ * @param len
+ * Length of memory segment being mapped.
+ *
+ * @return
+ * 0 on success, negative value on error.
+ */
+int
+mlx5_dma_map(struct rte_pci_device *pdev, void *addr,
+ uint64_t iova __rte_unused, size_t len)
+{
+ struct rte_eth_dev *dev;
struct mlx5_mr *mr;
+ struct mlx5_priv *priv;
+ struct mlx5_dev_ctx_shared *sh;
- mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
+ dev = pci_dev_to_eth_dev(pdev);
+ if (!dev) {
+ DRV_LOG(WARNING, "unable to find matching ethdev "
+ "to PCI device %p", (void *)pdev);
+ rte_errno = ENODEV;
+ return -1;
+ }
+ priv = dev->data->dev_private;
+ sh = priv->sh;
+ mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
+ sh->share_cache.reg_mr_cb);
if (!mr) {
- DEBUG("unable to configure MR, ibv_reg_mr() failed.");
- return NULL;
+ DRV_LOG(WARNING,
+ "port %u unable to dma map", dev->data->port_id);
+ rte_errno = EINVAL;
+ return -1;
}
- if (mlx5_check_mempool(mp, &start, &end) != 0) {
- ERROR("mempool %p: not virtually contiguous",
- (void *)mp);
- return NULL;
+ rte_rwlock_write_lock(&sh->share_cache.rwlock);
+ LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
+ /* Insert to the global cache table. */
+ mlx5_mr_insert_cache(&sh->share_cache, mr);
+ rte_rwlock_write_unlock(&sh->share_cache.rwlock);
+ return 0;
+}
+
+/**
+ * DPDK callback to DMA unmap external memory to a PCI device.
+ *
+ * @param pdev
+ * Pointer to the PCI device.
+ * @param addr
+ * Starting virtual address of memory to be unmapped.
+ * @param iova
+ * Starting IOVA address of memory to be unmapped.
+ * @param len
+ * Length of memory segment being unmapped.
+ *
+ * @return
+ * 0 on success, negative value on error.
+ */
+int
+mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr,
+ uint64_t iova __rte_unused, size_t len __rte_unused)
+{
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+ struct mlx5_dev_ctx_shared *sh;
+ struct mlx5_mr *mr;
+ struct mr_cache_entry entry;
+
+ dev = pci_dev_to_eth_dev(pdev);
+ if (!dev) {
+ DRV_LOG(WARNING, "unable to find matching ethdev "
+ "to PCI device %p", (void *)pdev);
+ rte_errno = ENODEV;
+ return -1;
}
- DEBUG("mempool %p area start=%p end=%p size=%zu",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- /* Save original addresses for exact MR lookup. */
- mr->start = start;
- mr->end = end;
- /* Round start and end to page boundary if found in memory segments. */
- for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
- uintptr_t addr = (uintptr_t)ms[i].addr;
- size_t len = ms[i].len;
- unsigned int align = ms[i].hugepage_sz;
-
- if ((start > addr) && (start < addr + len))
- start = RTE_ALIGN_FLOOR(start, align);
- if ((end > addr) && (end < addr + len))
- end = RTE_ALIGN_CEIL(end, align);
+ priv = dev->data->dev_private;
+ sh = priv->sh;
+ rte_rwlock_read_lock(&sh->share_cache.rwlock);
+ mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr);
+ if (!mr) {
+ rte_rwlock_read_unlock(&sh->share_cache.rwlock);
+ DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered "
+ "to PCI device %p", (uintptr_t)addr,
+ (void *)pdev);
+ rte_errno = EINVAL;
+ return -1;
}
- DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start,
- IBV_ACCESS_LOCAL_WRITE);
- mr->mp = mp;
- mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
- rte_atomic32_inc(&mr->refcnt);
- DEBUG("%p: new Memory Region %p refcnt: %d", (void *)dev,
- (void *)mr, rte_atomic32_read(&mr->refcnt));
- LIST_INSERT_HEAD(&priv->mr, mr, next);
- return mr;
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr);
+ DEBUG("port %u remove MR(%p) from list", dev->data->port_id,
+ (void *)mr);
+ mlx5_mr_rebuild_cache(&sh->share_cache);
+ /*
+ * Flush local caches by propagating invalidation across cores.
+ * rte_smp_wmb() is enough to synchronize this event. If one of
+ * freed memsegs is seen by other core, that means the memseg
+ * has been allocated by allocator, which will come after this
+ * free call. Therefore, this store instruction (incrementing
+ * generation below) will be guaranteed to be seen by other core
+ * before the core sees the newly allocated memory.
+ */
+ ++sh->share_cache.dev_gen;
+ DEBUG("broadcasting local cache flush, gen=%d",
+ sh->share_cache.dev_gen);
+ rte_smp_wmb();
+ rte_rwlock_read_unlock(&sh->share_cache.rwlock);
+ return 0;
}
/**
- * Search the memory region object in the memory region list.
+ * Register MR for entire memory chunks in a Mempool having externally allocated
+ * memory and fill in local cache.
*
* @param dev
* Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
* @param mp
- * Pointer to the memory pool to register.
+ * Pointer to registering Mempool.
*
* @return
- * The memory region on success.
+ * 0 on success, -1 on failure.
*/
-struct mlx5_mr *
-mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp)
+static uint32_t
+mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_mr *mr;
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
- assert(mp);
- if (LIST_EMPTY(&priv->mr))
- return NULL;
- LIST_FOREACH(mr, &priv->mr, next) {
- if (mr->mp == mp) {
- rte_atomic32_inc(&mr->refcnt);
- DEBUG("Memory Region %p refcnt: %d",
- (void *)mr, rte_atomic32_read(&mr->refcnt));
- return mr;
- }
- }
- return NULL;
+ rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data);
+ return data.ret;
}
/**
- * Release the memory region object.
+ * Register MR entire memory chunks in a Mempool having externally allocated
+ * memory and search LKey of the address to return.
*
- * @param mr
- * Pointer to memory region to release.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Search key.
+ * @param mp
+ * Pointer to registering Mempool where addr belongs.
*
* @return
- * 1 while a reference on it exists, 0 when freed.
+ * LKey for address on success, UINT32_MAX on failure.
*/
-int
-mlx5_mr_release(struct mlx5_mr *mr)
+uint32_t
+mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
+ struct rte_mempool *mp)
{
- assert(mr);
- DEBUG("Memory Region %p refcnt: %d",
- (void *)mr, rte_atomic32_read(&mr->refcnt));
- if (rte_atomic32_dec_and_test(&mr->refcnt)) {
- claim_zero(mlx5_glue->dereg_mr(mr->mr));
- LIST_REMOVE(mr, next);
- rte_free(mr);
- return 0;
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct mlx5_priv *priv = txq_ctrl->priv;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DRV_LOG(WARNING,
+ "port %u using address (%p) from unregistered mempool"
+ " having externally allocated memory"
+ " in secondary process, please create mempool"
+ " prior to rte_eth_dev_start()",
+ PORT_ID(priv), (void *)addr);
+ return UINT32_MAX;
}
- return 1;
+ mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
+ return mlx5_tx_addr2mr_bh(txq, addr);
+}
+
+/* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
+static void
+mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ struct rte_eth_dev *dev = data->dev;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ uint32_t lkey;
+
+ /* Stop iteration if failed in the previous walk. */
+ if (data->ret < 0)
+ return;
+ /* Register address of the chunk and update local caches. */
+ lkey = mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
+ &priv->sh->share_cache, data->mr_ctrl,
+ (uintptr_t)memhdr->addr,
+ priv->config.mr_ext_memseg_en);
+ if (lkey == UINT32_MAX)
+ data->ret = -1;
}
/**
- * Verify the flow list is empty
+ * Register entire memory chunks in a Mempool.
*
* @param dev
* Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mp
+ * Pointer to registering Mempool.
*
* @return
- * The number of object not released.
+ * 0 on success, -1 on failure.
*/
int
-mlx5_mr_verify(struct rte_eth_dev *dev)
+mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
{
- struct priv *priv = dev->data->dev_private;
- int ret = 0;
- struct mlx5_mr *mr;
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
- LIST_FOREACH(mr, &priv->mr, next) {
- DEBUG("%p: mr %p still referenced", (void *)dev,
- (void *)mr);
- ++ret;
+ rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
+ if (data.ret < 0 && rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp);
}
- return ret;
+ return data.ret;
}