#include <rte_rwlock.h>
#include "mlx5_glue.h"
+#include "mlx5_common.h"
#include "mlx5_common_mp.h"
#include "mlx5_common_mr.h"
+#include "mlx5_common_os.h"
#include "mlx5_common_log.h"
#include "mlx5_malloc.h"
struct mlx5_mempool_mr *mrs;
/** Number of memory regions. */
unsigned int mrs_n;
+ /** Whether the MR were created for external pinned memory. */
+ bool is_extmem;
};
+void
+mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
+{
+ struct mlx5_mprq_buf *buf = opaque;
+
+ if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
+ rte_mempool_put(buf->mp, buf);
+ } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
+ __ATOMIC_RELAXED) == 0)) {
+ __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ rte_mempool_put(buf->mp, buf);
+ }
+}
+
/**
* Expand B-tree table to a given size. Can't be called with holding
* memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
+static int
mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
{
if (bt == NULL) {
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-uint32_t
+static uint32_t
mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
struct mr_cache_entry *entry, uintptr_t addr)
{
return;
DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
dereg_mr_cb(&mr->pmd_mr);
- if (mr->ms_bmp != NULL)
- rte_bitmap_free(mr->ms_bmp);
+ rte_bitmap_free(mr->ms_bmp);
mlx5_free(mr);
}
* list is on the shared memory, following LKey lookup should succeed unless the
* request fails.
*
- * @param pd
- * Pointer to pd of a device (net, regex, vdpa,...).
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param share_cache
* Pointer to a global shared MR cache.
* @param[out] entry
* created. If failed to create one, this will not be updated.
* @param addr
* Target virtual address to register.
- * @param mr_ext_memseg_en
- * Configurable flag about external memory segment enable or not.
*
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
static uint32_t
-mlx5_mr_create_secondary(void *pd __rte_unused,
- struct mlx5_mp_id *mp_id,
+mlx5_mr_create_secondary(struct mlx5_common_device *cdev,
struct mlx5_mr_share_cache *share_cache,
- struct mr_cache_entry *entry, uintptr_t addr,
- unsigned int mr_ext_memseg_en __rte_unused)
+ struct mr_cache_entry *entry, uintptr_t addr)
{
int ret;
- DRV_LOG(DEBUG, "port %u requesting MR creation for address (%p)",
- mp_id->port_id, (void *)addr);
- ret = mlx5_mp_req_mr_create(mp_id, addr);
+ DRV_LOG(DEBUG, "Requesting MR creation for address (%p)", (void *)addr);
+ ret = mlx5_mp_req_mr_create(cdev, addr);
if (ret) {
DRV_LOG(DEBUG, "Fail to request MR creation for address (%p)",
- (void *)addr);
+ (void *)addr);
return UINT32_MAX;
}
rte_rwlock_read_lock(&share_cache->rwlock);
MLX5_ASSERT(entry->lkey != UINT32_MAX);
rte_rwlock_read_unlock(&share_cache->rwlock);
DRV_LOG(DEBUG, "MR CREATED by primary process for %p:\n"
- " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
- (void *)addr, entry->start, entry->end, entry->lkey);
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
+ (void *)addr, entry->start, entry->end, entry->lkey);
return entry->lkey;
}
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-uint32_t
+static uint32_t
mlx5_mr_create_primary(void *pd,
struct mlx5_mr_share_cache *share_cache,
struct mr_cache_entry *entry, uintptr_t addr,
* Create a new global Memory Region (MR) for a missing virtual address.
* This can be called from primary and secondary process.
*
- * @param pd
- * Pointer to pd handle of a device (net, regex, vdpa,...).
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param share_cache
* Pointer to a global shared MR cache.
* @param[out] entry
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-static uint32_t
-mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id,
+uint32_t
+mlx5_mr_create(struct mlx5_common_device *cdev,
struct mlx5_mr_share_cache *share_cache,
- struct mr_cache_entry *entry, uintptr_t addr,
- unsigned int mr_ext_memseg_en)
+ struct mr_cache_entry *entry, uintptr_t addr)
{
uint32_t ret = 0;
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
- ret = mlx5_mr_create_primary(pd, share_cache, entry,
- addr, mr_ext_memseg_en);
+ ret = mlx5_mr_create_primary(cdev->pd, share_cache, entry, addr,
+ cdev->config.mr_ext_memseg_en);
break;
case RTE_PROC_SECONDARY:
- ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry,
- addr, mr_ext_memseg_en);
+ ret = mlx5_mr_create_secondary(cdev, share_cache, entry, addr);
break;
default:
break;
* Look up address in the global MR cache table. If not found, create a new MR.
* Insert the found/created entry to local bottom-half cache table.
*
- * @param pd
- * Pointer to pd of a device (net, regex, vdpa,...).
- * @param share_cache
- * Pointer to a global shared MR cache.
* @param mr_ctrl
* Pointer to per-queue MR control structure.
* @param[out] entry
* Searched LKey on success, UINT32_MAX on no match.
*/
static uint32_t
-mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id,
- struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
- struct mr_cache_entry *entry, uintptr_t addr,
- unsigned int mr_ext_memseg_en)
+mr_lookup_caches(struct mlx5_mr_ctrl *mr_ctrl,
+ struct mr_cache_entry *entry, uintptr_t addr)
{
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
+ struct mlx5_common_device *cdev =
+ container_of(share_cache, struct mlx5_common_device, mr_scache);
struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
uint32_t lkey;
uint16_t idx;
}
rte_rwlock_read_unlock(&share_cache->rwlock);
/* First time to see the address? Create a new MR. */
- lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr,
- mr_ext_memseg_en);
+ lkey = mlx5_mr_create(cdev, share_cache, entry, addr);
/*
* Update the local cache if successfully created a new global MR. Even
* if failed to create one, there's no action to take in this datapath
* misses, search in the global MR cache table and update the new entry to
* per-queue local caches.
*
- * @param pd
- * Pointer to pd of a device (net, regex, vdpa,...).
- * @param share_cache
- * Pointer to a global shared MR cache.
* @param mr_ctrl
* Pointer to per-queue MR control structure.
* @param addr
* @return
* Searched LKey on success, UINT32_MAX on no match.
*/
-uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
- struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
- uintptr_t addr, unsigned int mr_ext_memseg_en)
+static uint32_t
+mlx5_mr_addr2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr)
{
uint32_t lkey;
uint16_t bh_idx = 0;
* and local cache_bh[] will be updated inside if possible.
* Top-half cache entry will also be updated.
*/
- lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl,
- repl, addr, mr_ext_memseg_en);
+ lkey = mr_lookup_caches(mr_ctrl, repl, addr);
if (unlikely(lkey == UINT32_MAX))
return UINT32_MAX;
}
}
/**
- * Release all the created MRs and resources on global MR cache of a device.
+ * Release all the created MRs and resources on global MR cache of a device
* list.
*
* @param share_cache
mlx5_mr_garbage_collect(share_cache);
}
+/**
+ * Initialize global MR cache of a device.
+ *
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket)
+{
+ /* Set the reg_mr and dereg_mr callback functions */
+ mlx5_os_set_reg_mr_cb(&share_cache->reg_mr_cb,
+ &share_cache->dereg_mr_cb);
+ rte_rwlock_init(&share_cache->rwlock);
+ rte_rwlock_init(&share_cache->mprwlock);
+ share_cache->mp_cb_registered = 0;
+ /* Initialize B-tree and allocate memory for global MR cache table. */
+ return mlx5_mr_btree_init(&share_cache->cache,
+ MLX5_MR_BTREE_CACHE_N * 2, socket);
+}
+
/**
* Flush all of the local cache entries.
*
/**
* Dump all the created MRs and the global cache entries.
*
- * @param sh
- * Pointer to Ethernet device shared context.
+ * @param share_cache
+ * Pointer to a global shared MR cache.
*/
void
mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
unsigned int idx)
{
struct mlx5_range *ranges = opaque, *range = &ranges[idx];
+ uintptr_t start = (uintptr_t)memhdr->addr;
uint64_t page_size = rte_mem_page_size();
RTE_SET_USED(mp);
- range->start = RTE_ALIGN_FLOOR((uintptr_t)memhdr->addr, page_size);
- range->end = RTE_ALIGN_CEIL(range->start + memhdr->len, page_size);
+ range->start = RTE_ALIGN_FLOOR(start, page_size);
+ range->end = RTE_ALIGN_CEIL(start + memhdr->len, page_size);
+}
+
+/**
+ * Collect page-aligned memory ranges of the mempool.
+ */
+static int
+mlx5_mempool_get_chunks(struct rte_mempool *mp, struct mlx5_range **out,
+ unsigned int *out_n)
+{
+ unsigned int n;
+
+ DRV_LOG(DEBUG, "Collecting chunks of regular mempool %s", mp->name);
+ n = mp->nb_mem_chunks;
+ *out = calloc(sizeof(**out), n);
+ if (*out == NULL)
+ return -1;
+ rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, *out);
+ *out_n = n;
+ return 0;
+}
+
+struct mlx5_mempool_get_extmem_data {
+ struct mlx5_range *heap;
+ unsigned int heap_size;
+ int ret;
+};
+
+static void
+mlx5_mempool_get_extmem_cb(struct rte_mempool *mp, void *opaque,
+ void *obj, unsigned int obj_idx)
+{
+ struct mlx5_mempool_get_extmem_data *data = opaque;
+ struct rte_mbuf *mbuf = obj;
+ uintptr_t addr = (uintptr_t)mbuf->buf_addr;
+ struct mlx5_range *seg, *heap;
+ struct rte_memseg_list *msl;
+ size_t page_size;
+ uintptr_t page_start;
+ unsigned int pos = 0, len = data->heap_size, delta;
+
+ RTE_SET_USED(mp);
+ RTE_SET_USED(obj_idx);
+ if (data->ret < 0)
+ return;
+ /* Binary search for an already visited page. */
+ while (len > 1) {
+ delta = len / 2;
+ if (addr < data->heap[pos + delta].start) {
+ len = delta;
+ } else {
+ pos += delta;
+ len -= delta;
+ }
+ }
+ if (data->heap != NULL) {
+ seg = &data->heap[pos];
+ if (seg->start <= addr && addr < seg->end)
+ return;
+ }
+ /* Determine the page boundaries and remember them. */
+ heap = realloc(data->heap, sizeof(heap[0]) * (data->heap_size + 1));
+ if (heap == NULL) {
+ free(data->heap);
+ data->heap = NULL;
+ data->ret = -1;
+ return;
+ }
+ data->heap = heap;
+ data->heap_size++;
+ seg = &heap[data->heap_size - 1];
+ msl = rte_mem_virt2memseg_list((void *)addr);
+ page_size = msl != NULL ? msl->page_sz : rte_mem_page_size();
+ page_start = RTE_PTR_ALIGN_FLOOR(addr, page_size);
+ seg->start = page_start;
+ seg->end = page_start + page_size;
+ /* Maintain the heap order. */
+ qsort(data->heap, data->heap_size, sizeof(heap[0]),
+ mlx5_range_compare_start);
+}
+
+/**
+ * Recover pages of external memory as close as possible
+ * for a mempool with RTE_PKTMBUF_POOL_PINNED_EXT_BUF.
+ * Pages are stored in a heap for efficient search, for mbufs are many.
+ */
+static int
+mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range **out,
+ unsigned int *out_n)
+{
+ struct mlx5_mempool_get_extmem_data data;
+
+ DRV_LOG(DEBUG, "Recovering external pinned pages of mempool %s",
+ mp->name);
+ memset(&data, 0, sizeof(data));
+ rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb, &data);
+ *out = data.heap;
+ *out_n = data.heap_size;
+ return data.ret;
}
/**
*
* @param[in] mp
* Analyzed mempool.
+ * @param[in] is_extmem
+ * Whether the pool is contains only external pinned buffers.
* @param[out] out
* Receives the ranges, caller must release it with free().
- * @param[out] ount_n
+ * @param[out] out_n
* Receives the number of @p out elements.
*
* @return
* 0 on success, (-1) on failure.
*/
static int
-mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,
- unsigned int *out_n)
+mlx5_get_mempool_ranges(struct rte_mempool *mp, bool is_extmem,
+ struct mlx5_range **out, unsigned int *out_n)
{
struct mlx5_range *chunks;
- unsigned int chunks_n = mp->nb_mem_chunks, contig_n, i;
+ unsigned int chunks_n, contig_n, i;
+ int ret;
- /* Collect page-aligned memory ranges of the mempool. */
- chunks = calloc(sizeof(chunks[0]), chunks_n);
- if (chunks == NULL)
- return -1;
- rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, chunks);
+ /* Collect the pool underlying memory. */
+ ret = is_extmem ? mlx5_mempool_get_extmem(mp, &chunks, &chunks_n) :
+ mlx5_mempool_get_chunks(mp, &chunks, &chunks_n);
+ if (ret < 0)
+ return ret;
/* Merge adjacent chunks and place them at the beginning. */
qsort(chunks, chunks_n, sizeof(chunks[0]), mlx5_range_compare_start);
contig_n = 1;
*
* @param[in] mp
* Mempool to analyze.
+ * @param[in] is_extmem
+ * Whether the pool is contains only external pinned buffers.
* @param[out] out
* Receives memory ranges to register, aligned to the system page size.
* The caller must release them with free().
* 0 on success, (-1) on failure.
*/
static int
-mlx5_mempool_reg_analyze(struct rte_mempool *mp, struct mlx5_range **out,
- unsigned int *out_n, bool *share_hugepage)
+mlx5_mempool_reg_analyze(struct rte_mempool *mp, bool is_extmem,
+ struct mlx5_range **out, unsigned int *out_n,
+ bool *share_hugepage)
{
struct mlx5_range *ranges = NULL;
unsigned int i, ranges_n = 0;
struct rte_memseg_list *msl;
- if (mlx5_get_mempool_ranges(mp, &ranges, &ranges_n) < 0) {
+ if (mlx5_get_mempool_ranges(mp, is_extmem, &ranges, &ranges_n) < 0) {
DRV_LOG(ERR, "Cannot get address ranges for mempool %s",
mp->name);
return -1;
/** Create a registration object for the mempool. */
static struct mlx5_mempool_reg *
-mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n)
+mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n,
+ bool is_extmem)
{
struct mlx5_mempool_reg *mpr = NULL;
mpr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
- sizeof(*mpr) + mrs_n * sizeof(mpr->mrs[0]),
+ sizeof(struct mlx5_mempool_reg),
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (mpr == NULL) {
DRV_LOG(ERR, "Cannot allocate mempool %s registration object",
mp->name);
return NULL;
}
+ mpr->mrs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ mrs_n * sizeof(struct mlx5_mempool_mr),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ if (!mpr->mrs) {
+ DRV_LOG(ERR, "Cannot allocate mempool %s registration MRs",
+ mp->name);
+ mlx5_free(mpr);
+ return NULL;
+ }
mpr->mp = mp;
- mpr->mrs = (struct mlx5_mempool_mr *)(mpr + 1);
mpr->mrs_n = mrs_n;
+ mpr->is_extmem = is_extmem;
return mpr;
}
* Destroy a mempool registration object.
*
* @param standalone
- * Whether @p mpr owns its MRs excludively, i.e. they are not shared.
+ * Whether @p mpr owns its MRs exclusively, i.e. they are not shared.
*/
static void
mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
for (i = 0; i < mpr->mrs_n; i++)
share_cache->dereg_mr_cb(&mpr->mrs[i].pmd_mr);
+ mlx5_free(mpr->mrs);
}
mlx5_free(mpr);
}
static int
mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
- void *pd, struct rte_mempool *mp)
+ void *pd, struct rte_mempool *mp,
+ bool is_extmem)
{
struct mlx5_range *ranges = NULL;
- struct mlx5_mempool_reg *mpr, *new_mpr;
+ struct mlx5_mempool_reg *mpr, *old_mpr, *new_mpr;
unsigned int i, ranges_n;
- bool share_hugepage;
+ bool share_hugepage, standalone = false;
int ret = -1;
/* Early check to avoid unnecessary creation of MRs. */
rte_rwlock_read_lock(&share_cache->rwlock);
- mpr = mlx5_mempool_reg_lookup(share_cache, mp);
+ old_mpr = mlx5_mempool_reg_lookup(share_cache, mp);
rte_rwlock_read_unlock(&share_cache->rwlock);
- if (mpr != NULL) {
+ if (old_mpr != NULL && (!is_extmem || old_mpr->is_extmem)) {
DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
mp->name, pd);
rte_errno = EEXIST;
goto exit;
}
- if (mlx5_mempool_reg_analyze(mp, &ranges, &ranges_n,
+ if (mlx5_mempool_reg_analyze(mp, is_extmem, &ranges, &ranges_n,
&share_hugepage) < 0) {
DRV_LOG(ERR, "Cannot get mempool %s memory ranges", mp->name);
rte_errno = ENOMEM;
goto exit;
}
- new_mpr = mlx5_mempool_reg_create(mp, ranges_n);
+ new_mpr = mlx5_mempool_reg_create(mp, ranges_n, is_extmem);
if (new_mpr == NULL) {
DRV_LOG(ERR,
"Cannot create a registration object for mempool %s in PD %p",
/* Concurrent registration is not supposed to happen. */
rte_rwlock_write_lock(&share_cache->rwlock);
mpr = mlx5_mempool_reg_lookup(share_cache, mp);
+ if (mpr == old_mpr && old_mpr != NULL) {
+ LIST_REMOVE(old_mpr, next);
+ standalone = mlx5_mempool_reg_detach(mpr);
+ /* No need to flush the cache: old MRs cannot be in use. */
+ mpr = NULL;
+ }
if (mpr == NULL) {
mlx5_mempool_reg_attach(new_mpr);
- LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
- new_mpr, next);
+ LIST_INSERT_HEAD(&share_cache->mempool_reg_list, new_mpr, next);
ret = 0;
}
rte_rwlock_write_unlock(&share_cache->rwlock);
mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
rte_errno = EEXIST;
goto exit;
+ } else if (old_mpr != NULL) {
+ DRV_LOG(DEBUG, "Mempool %s registration for PD %p updated for external memory",
+ mp->name, pd);
+ mlx5_mempool_reg_destroy(share_cache, old_mpr, standalone);
}
exit:
free(ranges);
}
static int
-mlx5_mr_mempool_register_secondary(struct mlx5_mr_share_cache *share_cache,
- void *pd, struct rte_mempool *mp,
- struct mlx5_mp_id *mp_id)
+mlx5_mr_mempool_register_secondary(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp, bool is_extmem)
{
- if (mp_id == NULL) {
- rte_errno = EINVAL;
- return -1;
- }
- return mlx5_mp_req_mempool_reg(mp_id, share_cache, pd, mp, true);
+ return mlx5_mp_req_mempool_reg(cdev, mp, true, is_extmem);
}
/**
* Register the memory of a mempool in the protection domain.
*
- * @param share_cache
- * Shared MR cache of the protection domain.
- * @param pd
- * Protection domain object.
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param mp
* Mempool to register.
- * @param mp_id
- * Multi-process identifier, may be NULL for the primary process.
*
* @return
* 0 on success, (-1) on failure and rte_errno is set.
*/
int
-mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,
- struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
+mlx5_mr_mempool_register(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp, bool is_extmem)
{
if (mp->flags & RTE_MEMPOOL_F_NON_IO)
return 0;
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
- return mlx5_mr_mempool_register_primary(share_cache, pd, mp);
+ return mlx5_mr_mempool_register_primary(&cdev->mr_scache,
+ cdev->pd, mp,
+ is_extmem);
case RTE_PROC_SECONDARY:
- return mlx5_mr_mempool_register_secondary(share_cache, pd, mp,
- mp_id);
+ return mlx5_mr_mempool_register_secondary(cdev, mp, is_extmem);
default:
return -1;
}
}
static int
-mlx5_mr_mempool_unregister_secondary(struct mlx5_mr_share_cache *share_cache,
- struct rte_mempool *mp,
- struct mlx5_mp_id *mp_id)
+mlx5_mr_mempool_unregister_secondary(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp)
{
- if (mp_id == NULL) {
- rte_errno = EINVAL;
- return -1;
- }
- return mlx5_mp_req_mempool_reg(mp_id, share_cache, NULL, mp, false);
+ return mlx5_mp_req_mempool_reg(cdev, mp, false, false /* is_extmem */);
}
/**
* Unregister the memory of a mempool from the protection domain.
*
- * @param share_cache
- * Shared MR cache of the protection domain.
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param mp
* Mempool to unregister.
- * @param mp_id
- * Multi-process identifier, may be NULL for the primary process.
*
* @return
* 0 on success, (-1) on failure and rte_errno is set.
*/
int
-mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,
- struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
+mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp)
{
if (mp->flags & RTE_MEMPOOL_F_NON_IO)
return 0;
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
- return mlx5_mr_mempool_unregister_primary(share_cache, mp);
+ return mlx5_mr_mempool_unregister_primary(&cdev->mr_scache, mp);
case RTE_PROC_SECONDARY:
- return mlx5_mr_mempool_unregister_secondary(share_cache, mp,
- mp_id);
+ return mlx5_mr_mempool_unregister_secondary(cdev, mp);
default:
return -1;
}
for (i = 0; i < mpr->mrs_n; i++) {
const struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr;
- uintptr_t mr_addr = (uintptr_t)mr->addr;
+ uintptr_t mr_start = (uintptr_t)mr->addr;
+ uintptr_t mr_end = mr_start + mr->len;
- if (mr_addr <= addr) {
+ if (mr_start <= addr && addr < mr_end) {
lkey = rte_cpu_to_be_32(mr->lkey);
- entry->start = mr_addr;
- entry->end = mr_addr + mr->len;
+ entry->start = mr_start;
+ entry->end = mr_end;
entry->lkey = lkey;
break;
}
/**
* Update bottom-half cache from the list of mempool registrations.
*
- * @param share_cache
- * Pointer to a global shared MR cache.
* @param mr_ctrl
* Per-queue MR control handle.
* @param entry
* MR lkey on success, UINT32_MAX on failure.
*/
static uint32_t
-mlx5_lookup_mempool_regs(struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
+mlx5_lookup_mempool_regs(struct mlx5_mr_ctrl *mr_ctrl,
struct mr_cache_entry *entry,
struct rte_mempool *mp, uintptr_t addr)
{
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
struct mlx5_mempool_reg *mpr;
uint32_t lkey = UINT32_MAX;
return lkey;
}
+/**
+ * Populate cache with LKeys of all MRs used by the mempool.
+ * It is intended to be used to register Rx mempools in advance.
+ *
+ * @param mr_ctrl
+ * Per-queue MR control handle.
+ * @param mp
+ * Registered memory pool.
+ *
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
+ struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
+ struct mlx5_mempool_reg *mpr;
+ unsigned int i;
+
+ /*
+ * Registration is valid after the lock is released,
+ * because the function is called after the mempool is registered.
+ */
+ rte_rwlock_read_lock(&share_cache->rwlock);
+ mpr = mlx5_mempool_reg_lookup(share_cache, mp);
+ rte_rwlock_read_unlock(&share_cache->rwlock);
+ if (mpr == NULL) {
+ DRV_LOG(ERR, "Mempool %s is not registered", mp->name);
+ rte_errno = ENOENT;
+ return -1;
+ }
+ for (i = 0; i < mpr->mrs_n; i++) {
+ struct mlx5_mempool_mr *mr = &mpr->mrs[i];
+ struct mr_cache_entry entry;
+ uint32_t lkey;
+ uint16_t idx;
+
+ lkey = mr_btree_lookup(bt, &idx, (uintptr_t)mr->pmd_mr.addr);
+ if (lkey != UINT32_MAX)
+ continue;
+ if (bt->len == bt->size)
+ mr_btree_expand(bt, bt->size << 1);
+ entry.start = (uintptr_t)mr->pmd_mr.addr;
+ entry.end = entry.start + mr->pmd_mr.len;
+ entry.lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
+ if (mr_btree_insert(bt, &entry) < 0) {
+ DRV_LOG(ERR, "Cannot insert cache entry for mempool %s MR %08x",
+ mp->name, entry.lkey);
+ rte_errno = EINVAL;
+ return -1;
+ }
+ }
+ return 0;
+}
+
/**
* Bottom-half lookup for the address from the mempool.
*
- * @param share_cache
- * Pointer to a global shared MR cache.
* @param mr_ctrl
* Per-queue MR control handle.
* @param mp
* MR lkey on success, UINT32_MAX on failure.
*/
uint32_t
-mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
+mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
struct rte_mempool *mp, uintptr_t addr)
{
struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
if (likely(lkey != UINT32_MAX)) {
*repl = (*mr_ctrl->cache_bh.table)[bh_idx];
} else {
- lkey = mlx5_lookup_mempool_regs(share_cache, mr_ctrl, repl,
- mp, addr);
+ lkey = mlx5_lookup_mempool_regs(mr_ctrl, repl, mp, addr);
/* Can only fail if the address is not from the mempool. */
if (unlikely(lkey == UINT32_MAX))
return UINT32_MAX;
mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
return lkey;
}
+
+uint32_t
+mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
+{
+ struct rte_mempool *mp;
+ struct mlx5_mprq_buf *buf;
+ uint32_t lkey;
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
+ struct mlx5_common_device *cdev =
+ container_of(share_cache, struct mlx5_common_device, mr_scache);
+ bool external, mprq, pinned = false;
+
+ /* Recover MPRQ mempool. */
+ external = RTE_MBUF_HAS_EXTBUF(mb);
+ if (external && mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
+ mprq = true;
+ buf = mb->shinfo->fcb_opaque;
+ mp = buf->mp;
+ } else {
+ mprq = false;
+ mp = mlx5_mb2mp(mb);
+ pinned = rte_pktmbuf_priv_flags(mp) &
+ RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
+ }
+ if (!external || mprq || pinned) {
+ lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
+ if (lkey != UINT32_MAX)
+ return lkey;
+ /* MPRQ is always registered. */
+ MLX5_ASSERT(!mprq);
+ }
+ /* Register pinned external memory if the mempool is not used for Rx. */
+ if (cdev->config.mr_mempool_reg_en && pinned) {
+ if (mlx5_mr_mempool_register(cdev, mp, true) < 0)
+ return UINT32_MAX;
+ lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
+ MLX5_ASSERT(lkey != UINT32_MAX);
+ return lkey;
+ }
+ /* Fallback to generic mechanism in corner cases. */
+ return mlx5_mr_addr2mr_bh(mr_ctrl, addr);
+}