* Copyright 2016 Mellanox Technologies, Ltd
*/
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
#include <rte_eal_memconfig.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_rwlock.h>
#include <rte_bus_pci.h>
-#include <mlx5_glue.h>
#include <mlx5_common_mp.h>
#include <mlx5_common_mr.h>
#include "mlx5.h"
#include "mlx5_mr.h"
#include "mlx5_rxtx.h"
+#include "mlx5_rx.h"
+#include "mlx5_tx.h"
struct mr_find_contig_memsegs_data {
uintptr_t addr;
int ret;
};
-/**
- * Callback for memory free event. Iterate freed memsegs and check whether it
- * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
- * result, the MR would be fragmented. If it becomes empty, the MR will be freed
- * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
- * secondary process, the garbage collector will be called in primary process
- * as the secondary process can't call mlx5_mr_create().
- *
- * The global cache must be rebuilt if there's any change and this event has to
- * be propagated to dataplane threads to flush the local caches.
- *
- * @param sh
- * Pointer to the Ethernet device shared context.
- * @param addr
- * Address of freed memory.
- * @param len
- * Size of freed memory.
- */
-static void
-mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh,
- const void *addr, size_t len)
-{
- const struct rte_memseg_list *msl;
- struct mlx5_mr *mr;
- int ms_n;
- int i;
- int rebuild = 0;
-
- DEBUG("device %s free callback: addr=%p, len=%zu",
- sh->ibdev_name, addr, len);
- msl = rte_mem_virt2memseg_list(addr);
- /* addr and len must be page-aligned. */
- MLX5_ASSERT((uintptr_t)addr ==
- RTE_ALIGN((uintptr_t)addr, msl->page_sz));
- MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
- ms_n = len / msl->page_sz;
- rte_rwlock_write_lock(&sh->share_cache.rwlock);
- /* Clear bits of freed memsegs from MR. */
- for (i = 0; i < ms_n; ++i) {
- const struct rte_memseg *ms;
- struct mr_cache_entry entry;
- uintptr_t start;
- int ms_idx;
- uint32_t pos;
-
- /* Find MR having this memseg. */
- start = (uintptr_t)addr + i * msl->page_sz;
- mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, start);
- if (mr == NULL)
- continue;
- MLX5_ASSERT(mr->msl); /* Can't be external memory. */
- ms = rte_mem_virt2memseg((void *)start, msl);
- MLX5_ASSERT(ms != NULL);
- MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
- ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
- pos = ms_idx - mr->ms_base_idx;
- MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
- MLX5_ASSERT(pos < mr->ms_bmp_n);
- DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
- sh->ibdev_name, (void *)mr, pos, (void *)start);
- rte_bitmap_clear(mr->ms_bmp, pos);
- if (--mr->ms_n == 0) {
- LIST_REMOVE(mr, mr);
- LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr);
- DEBUG("device %s remove MR(%p) from list",
- sh->ibdev_name, (void *)mr);
- }
- /*
- * MR is fragmented or will be freed. the global cache must be
- * rebuilt.
- */
- rebuild = 1;
- }
- if (rebuild) {
- mlx5_mr_rebuild_cache(&sh->share_cache);
- /*
- * Flush local caches by propagating invalidation across cores.
- * rte_smp_wmb() is enough to synchronize this event. If one of
- * freed memsegs is seen by other core, that means the memseg
- * has been allocated by allocator, which will come after this
- * free call. Therefore, this store instruction (incrementing
- * generation below) will be guaranteed to be seen by other core
- * before the core sees the newly allocated memory.
- */
- ++sh->share_cache.dev_gen;
- DEBUG("broadcasting local cache flush, gen=%d",
- sh->share_cache.dev_gen);
- rte_smp_wmb();
- }
- rte_rwlock_write_unlock(&sh->share_cache.rwlock);
-}
-
/**
* Callback for memory event. This can be called from both primary and secondary
* process.
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
/* Iterate all the existing mlx5 devices. */
LIST_FOREACH(sh, dev_list, mem_event_cb)
- mlx5_mr_mem_event_free_cb(sh, addr, len);
+ mlx5_free_mr_by_addr(&sh->share_cache,
+ sh->ibdev_name, addr, len);
rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
break;
case RTE_MEM_EVENT_ALLOC:
return;
DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
dev->data->port_id, mem_idx, mp->name);
- mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id);
+ mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
+ sh->share_cache.reg_mr_cb);
if (!mr) {
DRV_LOG(WARNING,
"port %u unable to allocate a new MR of"
{
uint16_t port_id;
- RTE_ETH_FOREACH_DEV_OF(port_id, &pdev->device)
- return &rte_eth_devices[port_id];
- return NULL;
+ port_id = rte_eth_find_next_of(0, &pdev->device);
+ if (port_id == RTE_MAX_ETHPORTS)
+ return NULL;
+ return &rte_eth_devices[port_id];
}
/**
}
priv = dev->data->dev_private;
sh = priv->sh;
- mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY);
+ mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
+ sh->share_cache.reg_mr_cb);
if (!mr) {
DRV_LOG(WARNING,
"port %u unable to dma map", dev->data->port_id);
}
priv = dev->data->dev_private;
sh = priv->sh;
- rte_rwlock_read_lock(&sh->share_cache.rwlock);
+ rte_rwlock_write_lock(&sh->share_cache.rwlock);
mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr);
if (!mr) {
- rte_rwlock_read_unlock(&sh->share_cache.rwlock);
+ rte_rwlock_write_unlock(&sh->share_cache.rwlock);
DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered "
"to PCI device %p", (uintptr_t)addr,
(void *)pdev);
return -1;
}
LIST_REMOVE(mr, mr);
- LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr);
- DEBUG("port %u remove MR(%p) from list", dev->data->port_id,
+ mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb);
+ DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id,
(void *)mr);
mlx5_mr_rebuild_cache(&sh->share_cache);
/*
- * Flush local caches by propagating invalidation across cores.
- * rte_smp_wmb() is enough to synchronize this event. If one of
- * freed memsegs is seen by other core, that means the memseg
- * has been allocated by allocator, which will come after this
- * free call. Therefore, this store instruction (incrementing
- * generation below) will be guaranteed to be seen by other core
- * before the core sees the newly allocated memory.
+ * No explicit wmb is needed after updating dev_gen due to
+ * store-release ordering in unlock that provides the
+ * implicit barrier at the software visible level.
*/
++sh->share_cache.dev_gen;
- DEBUG("broadcasting local cache flush, gen=%d",
+ DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
sh->share_cache.dev_gen);
- rte_smp_wmb();
- rte_rwlock_read_unlock(&sh->share_cache.rwlock);
+ rte_rwlock_write_unlock(&sh->share_cache.rwlock);
return 0;
}
.mr_ctrl = mr_ctrl,
.ret = 0,
};
+ uint32_t flags = rte_pktmbuf_priv_flags(mp);
+ if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) {
+ /*
+ * The pinned external buffer should be registered for DMA
+ * operations by application. The mem_list of the pool contains
+ * the list of chunks with mbuf structures w/o built-in data
+ * buffers and DMA actually does not happen there, no need
+ * to create MR for these chunks.
+ */
+ return 0;
+ }
+ DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
+ "having %u chunks.", dev->data->port_id,
+ mp->name, mp->nb_mem_chunks);
rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
if (data.ret < 0 && rte_errno == ENXIO) {
/* Mempool may have externally allocated memory. */