- DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
- dev->data->port_id, (void *)mr);
+ DRV_LOG(DEBUG, "device %s inserting MR(%p) to global cache",
+ sh->ibdev_name, (void *)mr);
* has to be searched by traversing the original MR list instead, which
* is very slow path. Otherwise, the global cache is all inclusive.
*/
* has to be searched by traversing the original MR list instead, which
* is very slow path. Otherwise, the global cache is all inclusive.
*/
- if (!unlikely(priv->mr.cache.overflow)) {
- lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (!unlikely(sh->mr.cache.overflow)) {
+ lkey = mr_btree_lookup(&sh->mr.cache, &idx, addr);
* MR can't be freed with holding the lock because rte_free() could call
* memory free callback function. This will be a deadlock situation.
*/
* MR can't be freed with holding the lock because rte_free() could call
* memory free callback function. This will be a deadlock situation.
*/
- free_list = priv->mr.mr_free_list;
- LIST_INIT(&priv->mr.mr_free_list);
- rte_rwlock_write_unlock(&priv->mr.rwlock);
+ free_list = sh->mr.mr_free_list;
+ LIST_INIT(&sh->mr.mr_free_list);
+ rte_rwlock_write_unlock(&sh->mr.rwlock);
DEBUG("port %u MR CREATED by primary process for %p:\n"
" [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
dev->data->port_id, (void *)addr,
DEBUG("port %u MR CREATED by primary process for %p:\n"
" [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
dev->data->port_id, (void *)addr,
const struct rte_memseg_list *msl;
const struct rte_memseg *ms;
struct mlx5_mr *mr = NULL;
const struct rte_memseg_list *msl;
const struct rte_memseg *ms;
struct mlx5_mr *mr = NULL;
dev->data->port_id, (void *)addr);
/*
* Release detached MRs if any. This can't be called with holding either
dev->data->port_id, (void *)addr);
/*
* Release detached MRs if any. This can't be called with holding either
* been detached by the memory free event but it couldn't be released
* inside the callback due to deadlock. As a result, releasing resources
* is quite opportunistic.
*/
* been detached by the memory free event but it couldn't be released
* inside the callback due to deadlock. As a result, releasing resources
* is quite opportunistic.
*/
/*
* If enabled, find out a contiguous virtual address chunk in use, to
* which the given address belongs, in order to register maximum range.
/*
* If enabled, find out a contiguous virtual address chunk in use, to
* which the given address belongs, in order to register maximum range.
data_re = data;
if (len > msl->page_sz &&
!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
data_re = data;
if (len > msl->page_sz &&
!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
*/
data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
data.end = data.start + msl->page_sz;
*/
data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
data.end = data.start + msl->page_sz;
/*
* Check the address is really missing. If other thread already created
* one or it is not found due to overflow, abort and return.
*/
/*
* Check the address is really missing. If other thread already created
* one or it is not found due to overflow, abort and return.
*/
/*
* Insert to the global cache table. It may fail due to
* low-on-memory. Then, this entry will have to be searched
* here again.
*/
/*
* Insert to the global cache table. It may fail due to
* low-on-memory. Then, this entry will have to be searched
* here again.
*/
DEBUG("port %u found MR for %p on final lookup, abort",
dev->data->port_id, (void *)addr);
DEBUG("port %u found MR for %p on final lookup, abort",
dev->data->port_id, (void *)addr);
/*
* Must be unlocked before calling rte_free() because
* mlx5_mr_mem_event_free_cb() can be called inside.
/*
* Must be unlocked before calling rte_free() because
* mlx5_mr_mem_event_free_cb() can be called inside.
memset(&ret, 0, sizeof(ret));
start = data_re.start + n * msl->page_sz;
/* Exclude memsegs already registered by other MRs. */
memset(&ret, 0, sizeof(ret));
start = data_re.start + n * msl->page_sz;
/* Exclude memsegs already registered by other MRs. */
* mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
* through mlx5_alloc_verbs_buf().
*/
* mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
* through mlx5_alloc_verbs_buf().
*/
- mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)data.start, len,
+ mr->ibv_mr = mlx5_glue->reg_mr(sh->pd, (void *)data.start, len,
IBV_ACCESS_LOCAL_WRITE);
if (mr->ibv_mr == NULL) {
DEBUG("port %u fail to create a verbs MR for address (%p)",
IBV_ACCESS_LOCAL_WRITE);
if (mr->ibv_mr == NULL) {
DEBUG("port %u fail to create a verbs MR for address (%p)",
DEBUG("port %u MR CREATED (%p) for %p:\n"
" [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
" lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
DEBUG("port %u MR CREATED (%p) for %p:\n"
" [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
" lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
/* Insert to the global cache table. */
data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
/* Insert to the global cache table. */
- LIST_FOREACH(mr, &priv->mr.mr_list, mr)
- if (mr_insert_dev_cache(dev, mr) < 0)
+ LIST_FOREACH(mr, &sh->mr.mr_list, mr)
+ if (mr_insert_dev_cache(sh, mr) < 0)
* The global cache must be rebuilt if there's any change and this event has to
* be propagated to dataplane threads to flush the local caches.
*
* The global cache must be rebuilt if there's any change and this event has to
* be propagated to dataplane threads to flush the local caches.
*
-mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
+mlx5_mr_mem_event_free_cb(struct mlx5_ibv_shared *sh,
+ const void *addr, size_t len)
- DEBUG("port %u free callback: addr=%p, len=%zu",
- dev->data->port_id, addr, len);
+ DEBUG("device %s free callback: addr=%p, len=%zu",
+ sh->ibdev_name, addr, len);
msl = rte_mem_virt2memseg_list(addr);
/* addr and len must be page-aligned. */
assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
assert(len == RTE_ALIGN(len, msl->page_sz));
ms_n = len / msl->page_sz;
msl = rte_mem_virt2memseg_list(addr);
/* addr and len must be page-aligned. */
assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
assert(len == RTE_ALIGN(len, msl->page_sz));
ms_n = len / msl->page_sz;
/* Clear bits of freed memsegs from MR. */
for (i = 0; i < ms_n; ++i) {
const struct rte_memseg *ms;
/* Clear bits of freed memsegs from MR. */
for (i = 0; i < ms_n; ++i) {
const struct rte_memseg *ms;
pos = ms_idx - mr->ms_base_idx;
assert(rte_bitmap_get(mr->ms_bmp, pos));
assert(pos < mr->ms_bmp_n);
pos = ms_idx - mr->ms_base_idx;
assert(rte_bitmap_get(mr->ms_bmp, pos));
assert(pos < mr->ms_bmp_n);
- DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
- dev->data->port_id, (void *)mr, pos, (void *)start);
+ DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
+ sh->ibdev_name, (void *)mr, pos, (void *)start);
- LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
- DEBUG("port %u remove MR(%p) from list",
- dev->data->port_id, (void *)mr);
+ LIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr);
+ DEBUG("device %s remove MR(%p) from list",
+ sh->ibdev_name, (void *)mr);
/*
* Flush local caches by propagating invalidation across cores.
* rte_smp_wmb() is enough to synchronize this event. If one of
/*
* Flush local caches by propagating invalidation across cores.
* rte_smp_wmb() is enough to synchronize this event. If one of
* generation below) will be guaranteed to be seen by other core
* before the core sees the newly allocated memory.
*/
* generation below) will be guaranteed to be seen by other core
* before the core sees the newly allocated memory.
*/
mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
size_t len, void *arg __rte_unused)
{
mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
size_t len, void *arg __rte_unused)
{
struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
/* Must be called from the primary process. */
struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
/* Must be called from the primary process. */
case RTE_MEM_EVENT_FREE:
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
/* Iterate all the existing mlx5 devices. */
case RTE_MEM_EVENT_FREE:
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
/* Iterate all the existing mlx5 devices. */
- LIST_FOREACH(priv, dev_list, mem_event_cb)
- mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
+ LIST_FOREACH(sh, dev_list, mem_event_cb)
+ mlx5_mr_mem_event_free_cb(sh, addr, len);
struct mlx5_mr_cache *entry, uintptr_t addr)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr_cache *entry, uintptr_t addr)
{
struct mlx5_priv *priv = dev->data->dev_private;
if (unlikely(bt->len == bt->size))
mr_btree_expand(bt, bt->size << 1);
/* Look up in the global cache. */
if (unlikely(bt->len == bt->size))
mr_btree_expand(bt, bt->size << 1);
/* Look up in the global cache. */
- rte_rwlock_read_lock(&priv->mr.rwlock);
- lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ rte_rwlock_read_lock(&sh->mr.rwlock);
+ lkey = mr_btree_lookup(&sh->mr.cache, &idx, addr);
/*
* Update local cache. Even if it fails, return the found entry
* to update top-half cache. Next time, this entry will be found
/*
* Update local cache. Even if it fails, return the found entry
* to update top-half cache. Next time, this entry will be found
/* First time to see the address? Create a new MR. */
lkey = mlx5_mr_create(dev, entry, addr);
/*
/* First time to see the address? Create a new MR. */
lkey = mlx5_mr_create(dev, entry, addr);
/*
struct mr_update_mp_data *data = opaque;
struct rte_eth_dev *dev = data->dev;
struct mlx5_priv *priv = dev->data->dev_private;
struct mr_update_mp_data *data = opaque;
struct rte_eth_dev *dev = data->dev;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
struct mlx5_mr *mr = NULL;
uintptr_t addr = (uintptr_t)memhdr->addr;
struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
struct mlx5_mr *mr = NULL;
uintptr_t addr = (uintptr_t)memhdr->addr;
assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* If already registered, it should return. */
assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* If already registered, it should return. */
- rte_rwlock_read_lock(&priv->mr.rwlock);
- lkey = mr_lookup_dev(dev, &entry, addr);
- rte_rwlock_read_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_lock(&sh->mr.rwlock);
+ lkey = mr_lookup_dev(sh, &entry, addr);
+ rte_rwlock_read_unlock(&sh->mr.rwlock);
- rte_rwlock_write_lock(&priv->mr.rwlock);
- LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ rte_rwlock_write_lock(&sh->mr.rwlock);
+ LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
- mr_insert_dev_cache(dev, mr);
- rte_rwlock_write_unlock(&priv->mr.rwlock);
+ mr_insert_dev_cache(sh, mr);
+ rte_rwlock_write_unlock(&sh->mr.rwlock);
/* Insert to the local cache table */
mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr);
}
/* Insert to the local cache table */
mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr);
}
- /**
- * We really need to iterate all devices regardless of
- * their owner.
- */
- RTE_DEV_FOREACH(dev, "class=eth", &it)
- if (dev == &pdev->device)
- return it.class_device;
+ RTE_ETH_FOREACH_DEV_OF(port_id, &pdev->device)
+ return &rte_eth_devices[port_id];
- rte_rwlock_write_lock(&priv->mr.rwlock);
- LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ sh = priv->sh;
+ rte_rwlock_write_lock(&sh->mr.rwlock);
+ LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
- mr_insert_dev_cache(dev, mr);
- rte_rwlock_write_unlock(&priv->mr.rwlock);
+ mr_insert_dev_cache(sh, mr);
+ rte_rwlock_write_unlock(&sh->mr.rwlock);
- rte_rwlock_read_lock(&priv->mr.rwlock);
- mr = mr_lookup_dev_list(dev, &entry, (uintptr_t)addr);
+ sh = priv->sh;
+ rte_rwlock_read_lock(&sh->mr.rwlock);
+ mr = mr_lookup_dev_list(sh, &entry, (uintptr_t)addr);
DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered "
"to PCI device %p", (uintptr_t)addr,
(void *)pdev);
DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered "
"to PCI device %p", (uintptr_t)addr,
(void *)pdev);
/*
* Flush local caches by propagating invalidation across cores.
* rte_smp_wmb() is enough to synchronize this event. If one of
/*
* Flush local caches by propagating invalidation across cores.
* rte_smp_wmb() is enough to synchronize this event. If one of
* generation below) will be guaranteed to be seen by other core
* before the core sees the newly allocated memory.
*/
* generation below) will be guaranteed to be seen by other core
* before the core sees the newly allocated memory.
*/
- DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
- dev->data->port_id, mr_n++,
+ DEBUG("device %s MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
+ sh->ibdev_name, mr_n++,
- DEBUG("port %u dumping global cache", dev->data->port_id);
- mlx5_mr_btree_dump(&priv->mr.cache);
- rte_rwlock_read_unlock(&priv->mr.rwlock);
+ DEBUG("device %s dumping global cache", sh->ibdev_name);
+ mlx5_mr_btree_dump(&sh->mr.cache);
+ rte_rwlock_read_unlock(&sh->mr.rwlock);
- /* Remove from memory callback device list. */
- rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
- LIST_REMOVE(priv, mem_event_cb);
- rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
while (mr_next != NULL) {
struct mlx5_mr *mr = mr_next;
mr_next = LIST_NEXT(mr, mr);
LIST_REMOVE(mr, mr);
while (mr_next != NULL) {
struct mlx5_mr *mr = mr_next;
mr_next = LIST_NEXT(mr, mr);
LIST_REMOVE(mr, mr);