#pragma GCC diagnostic error "-Wpedantic"
#endif
+#include <rte_eal_memconfig.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_rwlock.h>
#include <rte_bus_pci.h>
+#include <mlx5_glue.h>
+
#include "mlx5.h"
#include "mlx5_mr.h"
#include "mlx5_rxtx.h"
-#include "mlx5_glue.h"
struct mr_find_contig_memsegs_data {
uintptr_t addr;
uint16_t n;
uint16_t base = 0;
- assert(bt != NULL);
+ MLX5_ASSERT(bt != NULL);
lkp_tbl = *bt->table;
n = bt->len;
/* First entry must be NULL for comparison. */
- assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
- lkp_tbl[0].lkey == UINT32_MAX));
+ MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+ lkp_tbl[0].lkey == UINT32_MAX));
/* Binary search. */
do {
register uint16_t delta = n >> 1;
n -= delta;
}
} while (n > 1);
- assert(addr >= lkp_tbl[base].start);
+ MLX5_ASSERT(addr >= lkp_tbl[base].start);
*idx = base;
if (addr < lkp_tbl[base].end)
return lkp_tbl[base].lkey;
uint16_t idx = 0;
size_t shift;
- assert(bt != NULL);
- assert(bt->len <= bt->size);
- assert(bt->len > 0);
+ MLX5_ASSERT(bt != NULL);
+ MLX5_ASSERT(bt->len <= bt->size);
+ MLX5_ASSERT(bt->len > 0);
lkp_tbl = *bt->table;
/* Find out the slot for insertion. */
if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
rte_errno = EINVAL;
return -rte_errno;
}
- assert(!bt->table && !bt->size);
+ MLX5_ASSERT(!bt->table && !bt->size);
memset(bt, 0, sizeof(*bt));
bt->table = rte_calloc_socket("B-tree table",
n, sizeof(struct mlx5_mr_cache),
void
mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
{
-#ifndef NDEBUG
+#ifdef RTE_LIBRTE_MLX5_DEBUG
int idx;
struct mlx5_mr_cache *lkp_tbl;
if (mr->msl == NULL) {
struct ibv_mr *ibv_mr = mr->ibv_mr;
- assert(mr->ms_bmp_n == 1);
- assert(mr->ms_n == 1);
- assert(base_idx == 0);
+ MLX5_ASSERT(mr->ms_bmp_n == 1);
+ MLX5_ASSERT(mr->ms_n == 1);
+ MLX5_ASSERT(base_idx == 0);
/*
* Can't search it from memseg list but get it directly from
* verbs MR as there's only one chunk.
msl = mr->msl;
ms = rte_fbarray_get(&msl->memseg_arr,
mr->ms_base_idx + idx);
- assert(msl->page_sz == ms->hugepage_sz);
+ MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
if (!start)
start = ms->addr_64;
end = ms->addr_64 + ms->hugepage_sz;
* mlx5_mr_create() on miss.
*
* @param dev
- * Pointer to Ethernet device.
+ * Pointer to Ethernet device shared context.
* @param mr
* Pointer to MR to insert.
*
* 0 on success, -1 on failure.
*/
static int
-mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr)
+mr_insert_dev_cache(struct mlx5_ibv_shared *sh, struct mlx5_mr *mr)
{
- struct mlx5_priv *priv = dev->data->dev_private;
unsigned int n;
- DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
- dev->data->port_id, (void *)mr);
+ DRV_LOG(DEBUG, "device %s inserting MR(%p) to global cache",
+ sh->ibdev_name, (void *)mr);
for (n = 0; n < mr->ms_bmp_n; ) {
struct mlx5_mr_cache entry;
n = mr_find_next_chunk(mr, &entry, n);
if (!entry.end)
break;
- if (mr_btree_insert(&priv->sh->mr.cache, &entry) < 0) {
+ if (mr_btree_insert(&sh->mr.cache, &entry) < 0) {
/*
* Overflowed, but the global table cannot be expanded
* because of deadlock.
/**
* Look up address in the original global MR list.
*
- * @param dev
- * Pointer to Ethernet device.
+ * @param sh
+ * Pointer to Ethernet device shared context.
* @param[out] entry
* Pointer to returning MR cache entry. If no match, this will not be updated.
* @param addr
* Found MR on match, NULL otherwise.
*/
static struct mlx5_mr *
-mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+mr_lookup_dev_list(struct mlx5_ibv_shared *sh, struct mlx5_mr_cache *entry,
uintptr_t addr)
{
- struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
/* Iterate all the existing MRs. */
- LIST_FOREACH(mr, &priv->sh->mr.mr_list, mr) {
+ LIST_FOREACH(mr, &sh->mr.mr_list, mr) {
unsigned int n;
if (mr->ms_n == 0)
* Look up address on device.
*
* @param dev
- * Pointer to Ethernet device.
+ * Pointer to Ethernet device shared context.
* @param[out] entry
* Pointer to returning MR cache entry. If no match, this will not be updated.
* @param addr
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
static uint32_t
-mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+mr_lookup_dev(struct mlx5_ibv_shared *sh, struct mlx5_mr_cache *entry,
uintptr_t addr)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
uint16_t idx;
uint32_t lkey = UINT32_MAX;
struct mlx5_mr *mr;
*entry = (*sh->mr.cache.table)[idx];
} else {
/* Falling back to the slowest path. */
- mr = mr_lookup_dev_list(dev, entry, addr);
+ mr = mr_lookup_dev_list(sh, entry, addr);
if (mr != NULL)
lkey = entry->lkey;
}
- assert(lkey == UINT32_MAX || (addr >= entry->start &&
- addr < entry->end));
+ MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
+ addr < entry->end));
return lkey;
}
struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
/* Must be called from the primary process. */
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/*
* MR can't be freed with holding the lock because rte_free() could call
* memory free callback function. This will be a deadlock situation.
}
rte_rwlock_read_lock(&priv->sh->mr.rwlock);
/* Fill in output data. */
- mr_lookup_dev(dev, entry, addr);
+ mr_lookup_dev(priv->sh, entry, addr);
/* Lookup can't fail. */
- assert(entry->lkey != UINT32_MAX);
+ MLX5_ASSERT(entry->lkey != UINT32_MAX);
rte_rwlock_read_unlock(&priv->sh->mr.rwlock);
DEBUG("port %u MR CREATED by primary process for %p:\n"
" [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_dev_config *config = &priv->config;
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
const struct rte_memseg_list *msl;
const struct rte_memseg *ms;
struct mlx5_mr *mr = NULL;
}
alloc_resources:
/* Addresses must be page-aligned. */
- assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
- assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
+ MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
+ MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
msl = data.msl;
ms = rte_mem_virt2memseg((void *)data.start, msl);
len = data.end - data.start;
- assert(msl->page_sz == ms->hugepage_sz);
+ MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
/* Number of memsegs in the range. */
ms_n = len / msl->page_sz;
DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
* just single page. If not, go on with the big chunk atomically from
* here.
*/
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_lock();
data_re = data;
if (len > msl->page_sz &&
!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
*/
data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
data.end = data.start + msl->page_sz;
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
mr_free(mr);
goto alloc_resources;
}
- assert(data.msl == data_re.msl);
+ MLX5_ASSERT(data.msl == data_re.msl);
rte_rwlock_write_lock(&sh->mr.rwlock);
/*
* Check the address is really missing. If other thread already created
* one or it is not found due to overflow, abort and return.
*/
- if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
+ if (mr_lookup_dev(sh, entry, addr) != UINT32_MAX) {
/*
* Insert to the global cache table. It may fail due to
* low-on-memory. Then, this entry will have to be searched
DEBUG("port %u found MR for %p on final lookup, abort",
dev->data->port_id, (void *)addr);
rte_rwlock_write_unlock(&sh->mr.rwlock);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
/*
* Must be unlocked before calling rte_free() because
* mlx5_mr_mem_event_free_cb() can be called inside.
memset(&ret, 0, sizeof(ret));
start = data_re.start + n * msl->page_sz;
/* Exclude memsegs already registered by other MRs. */
- if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
+ if (mr_lookup_dev(sh, &ret, start) == UINT32_MAX) {
/*
* Start from the first unregistered memseg in the
* extended range.
}
len = data.end - data.start;
mr->ms_bmp_n = len / msl->page_sz;
- assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+ MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
/*
* Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
* called with holding the memory lock because it doesn't use
* through mlx5_alloc_verbs_buf().
*/
mr->ibv_mr = mlx5_glue->reg_mr(sh->pd, (void *)data.start, len,
- IBV_ACCESS_LOCAL_WRITE);
+ IBV_ACCESS_LOCAL_WRITE |
+ IBV_ACCESS_RELAXED_ORDERING);
if (mr->ibv_mr == NULL) {
DEBUG("port %u fail to create a verbs MR for address (%p)",
dev->data->port_id, (void *)addr);
rte_errno = EINVAL;
goto err_mrlock;
}
- assert((uintptr_t)mr->ibv_mr->addr == data.start);
- assert(mr->ibv_mr->length == len);
+ MLX5_ASSERT((uintptr_t)mr->ibv_mr->addr == data.start);
+ MLX5_ASSERT(mr->ibv_mr->length == len);
LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
DEBUG("port %u MR CREATED (%p) for %p:\n"
" [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
/* Insert to the global cache table. */
- mr_insert_dev_cache(dev, mr);
+ mr_insert_dev_cache(sh, mr);
/* Fill in output data. */
- mr_lookup_dev(dev, entry, addr);
+ mr_lookup_dev(sh, entry, addr);
/* Lookup can't fail. */
- assert(entry->lkey != UINT32_MAX);
+ MLX5_ASSERT(entry->lkey != UINT32_MAX);
rte_rwlock_write_unlock(&sh->mr.rwlock);
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
return entry->lkey;
err_mrlock:
rte_rwlock_write_unlock(&sh->mr.rwlock);
err_memlock:
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_mcfg_mem_read_unlock();
err_nolock:
/*
* In case of error, as this can be called in a datapath, a warning
/**
* Rebuild the global B-tree cache of device from the original MR list.
*
- * @param dev
- * Pointer to Ethernet device.
+ * @param sh
+ * Pointer to Ethernet device shared context.
*/
static void
-mr_rebuild_dev_cache(struct rte_eth_dev *dev)
+mr_rebuild_dev_cache(struct mlx5_ibv_shared *sh)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_mr *mr;
- DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id);
+ DRV_LOG(DEBUG, "device %s rebuild dev cache[]", sh->ibdev_name);
/* Flush cache to rebuild. */
sh->mr.cache.len = 1;
sh->mr.cache.overflow = 0;
/* Iterate all the existing MRs. */
LIST_FOREACH(mr, &sh->mr.mr_list, mr)
- if (mr_insert_dev_cache(dev, mr) < 0)
+ if (mr_insert_dev_cache(sh, mr) < 0)
return;
}
* The global cache must be rebuilt if there's any change and this event has to
* be propagated to dataplane threads to flush the local caches.
*
- * @param dev
- * Pointer to Ethernet device.
+ * @param sh
+ * Pointer to the Ethernet device shared context.
* @param addr
* Address of freed memory.
* @param len
* Size of freed memory.
*/
static void
-mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
+mlx5_mr_mem_event_free_cb(struct mlx5_ibv_shared *sh,
+ const void *addr, size_t len)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
const struct rte_memseg_list *msl;
struct mlx5_mr *mr;
int ms_n;
int i;
int rebuild = 0;
- DEBUG("port %u free callback: addr=%p, len=%zu",
- dev->data->port_id, addr, len);
+ DEBUG("device %s free callback: addr=%p, len=%zu",
+ sh->ibdev_name, addr, len);
msl = rte_mem_virt2memseg_list(addr);
/* addr and len must be page-aligned. */
- assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
- assert(len == RTE_ALIGN(len, msl->page_sz));
+ MLX5_ASSERT((uintptr_t)addr ==
+ RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
ms_n = len / msl->page_sz;
rte_rwlock_write_lock(&sh->mr.rwlock);
/* Clear bits of freed memsegs from MR. */
/* Find MR having this memseg. */
start = (uintptr_t)addr + i * msl->page_sz;
- mr = mr_lookup_dev_list(dev, &entry, start);
+ mr = mr_lookup_dev_list(sh, &entry, start);
if (mr == NULL)
continue;
- assert(mr->msl); /* Can't be external memory. */
+ MLX5_ASSERT(mr->msl); /* Can't be external memory. */
ms = rte_mem_virt2memseg((void *)start, msl);
- assert(ms != NULL);
- assert(msl->page_sz == ms->hugepage_sz);
+ MLX5_ASSERT(ms != NULL);
+ MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
pos = ms_idx - mr->ms_base_idx;
- assert(rte_bitmap_get(mr->ms_bmp, pos));
- assert(pos < mr->ms_bmp_n);
- DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
- dev->data->port_id, (void *)mr, pos, (void *)start);
+ MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
+ MLX5_ASSERT(pos < mr->ms_bmp_n);
+ DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
+ sh->ibdev_name, (void *)mr, pos, (void *)start);
rte_bitmap_clear(mr->ms_bmp, pos);
if (--mr->ms_n == 0) {
LIST_REMOVE(mr, mr);
LIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr);
- DEBUG("port %u remove MR(%p) from list",
- dev->data->port_id, (void *)mr);
+ DEBUG("device %s remove MR(%p) from list",
+ sh->ibdev_name, (void *)mr);
}
/*
* MR is fragmented or will be freed. the global cache must be
rebuild = 1;
}
if (rebuild) {
- mr_rebuild_dev_cache(dev);
+ mr_rebuild_dev_cache(sh);
/*
* Flush local caches by propagating invalidation across cores.
* rte_smp_wmb() is enough to synchronize this event. If one of
mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
size_t len, void *arg __rte_unused)
{
- struct mlx5_priv *priv;
+ struct mlx5_ibv_shared *sh;
struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
/* Must be called from the primary process. */
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
switch (event_type) {
case RTE_MEM_EVENT_FREE:
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
/* Iterate all the existing mlx5 devices. */
- LIST_FOREACH(priv, dev_list, mem_event_cb)
- mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
+ LIST_FOREACH(sh, dev_list, mem_event_cb)
+ mlx5_mr_mem_event_free_cb(sh, addr, len);
rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
break;
case RTE_MEM_EVENT_ALLOC:
if (mr == NULL)
return NULL;
mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)addr, len,
- IBV_ACCESS_LOCAL_WRITE);
+ IBV_ACCESS_LOCAL_WRITE |
+ IBV_ACCESS_RELAXED_ORDERING);
if (mr->ibv_mr == NULL) {
DRV_LOG(WARNING,
"port %u fail to create a verbs MR for address (%p)",
struct mlx5_mr_cache entry;
uint32_t lkey;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* If already registered, it should return. */
rte_rwlock_read_lock(&sh->mr.rwlock);
- lkey = mr_lookup_dev(dev, &entry, addr);
+ lkey = mr_lookup_dev(sh, &entry, addr);
rte_rwlock_read_unlock(&sh->mr.rwlock);
if (lkey != UINT32_MAX)
return;
rte_rwlock_write_lock(&sh->mr.rwlock);
LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
/* Insert to the global cache table. */
- mr_insert_dev_cache(dev, mr);
+ mr_insert_dev_cache(sh, mr);
rte_rwlock_write_unlock(&sh->mr.rwlock);
/* Insert to the local cache table */
mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr);
rte_rwlock_write_lock(&sh->mr.rwlock);
LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
/* Insert to the global cache table. */
- mr_insert_dev_cache(dev, mr);
+ mr_insert_dev_cache(sh, mr);
rte_rwlock_write_unlock(&sh->mr.rwlock);
return 0;
}
priv = dev->data->dev_private;
sh = priv->sh;
rte_rwlock_read_lock(&sh->mr.rwlock);
- mr = mr_lookup_dev_list(dev, &entry, (uintptr_t)addr);
+ mr = mr_lookup_dev_list(sh, &entry, (uintptr_t)addr);
if (!mr) {
rte_rwlock_read_unlock(&sh->mr.rwlock);
DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered "
LIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr);
DEBUG("port %u remove MR(%p) from list", dev->data->port_id,
(void *)mr);
- mr_rebuild_dev_cache(dev);
+ mr_rebuild_dev_cache(sh);
/*
* Flush local caches by propagating invalidation across cores.
* rte_smp_wmb() is enough to synchronize this event. If one of
void
mlx5_mr_dump_dev(struct mlx5_ibv_shared *sh __rte_unused)
{
-#ifndef NDEBUG
+#ifdef RTE_LIBRTE_MLX5_DEBUG
struct mlx5_mr *mr;
int mr_n = 0;
int chunk_n = 0;
{
struct mlx5_mr *mr_next;
- if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
+ if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG))
mlx5_mr_dump_dev(sh);
rte_rwlock_write_lock(&sh->mr.rwlock);
/* Detach from MR list and move to free list. */