* Copyright 2016 6WIND S.A.
* Copyright 2020 Mellanox Technologies, Ltd
*/
+#include <stddef.h>
+
#include <rte_eal_memconfig.h>
+#include <rte_eal_paging.h>
#include <rte_errno.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_rwlock.h>
#include "mlx5_glue.h"
+#include "mlx5_common.h"
#include "mlx5_common_mp.h"
#include "mlx5_common_mr.h"
-#include "mlx5_common_utils.h"
+#include "mlx5_common_os.h"
+#include "mlx5_common_log.h"
#include "mlx5_malloc.h"
struct mr_find_contig_memsegs_data {
const struct rte_memseg_list *msl;
};
+/* Virtual memory range. */
+struct mlx5_range {
+ uintptr_t start;
+ uintptr_t end;
+};
+
+/** Memory region for a mempool. */
+struct mlx5_mempool_mr {
+ struct mlx5_pmd_mr pmd_mr;
+ uint32_t refcnt; /**< Number of mempools sharing this MR. */
+};
+
+/* Mempool registration. */
+struct mlx5_mempool_reg {
+ LIST_ENTRY(mlx5_mempool_reg) next;
+ /** Registered mempool, used to designate registrations. */
+ struct rte_mempool *mp;
+ /** Memory regions for the address ranges of the mempool. */
+ struct mlx5_mempool_mr *mrs;
+ /** Number of memory regions. */
+ unsigned int mrs_n;
+ /** Whether the MR were created for external pinned memory. */
+ bool is_extmem;
+};
+
+void
+mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
+{
+ struct mlx5_mprq_buf *buf = opaque;
+
+ if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
+ rte_mempool_put(buf->mp, buf);
+ } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
+ __ATOMIC_RELAXED) == 0)) {
+ __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ rte_mempool_put(buf->mp, buf);
+ }
+}
+
/**
* Expand B-tree table to a given size. Can't be called with holding
* memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
+static int
mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
{
if (bt == NULL) {
#endif
}
+/**
+ * Initialize per-queue MR control descriptor.
+ *
+ * @param mr_ctrl
+ * Pointer to MR control structure.
+ * @param dev_gen_ptr
+ * Pointer to generation number of global cache.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
+ int socket)
+{
+ if (mr_ctrl == NULL) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ /* Save pointer of global generation number to check memory event. */
+ mr_ctrl->dev_gen_ptr = dev_gen_ptr;
+ /* Initialize B-tree and allocate memory for bottom-half cache table. */
+ return mlx5_mr_btree_init(&mr_ctrl->cache_bh, MLX5_MR_BTREE_CACHE_N,
+ socket);
+}
+
/**
* Find virtually contiguous memory chunk in a given MR.
*
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-uint32_t
+static uint32_t
mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
struct mr_cache_entry *entry, uintptr_t addr)
{
return;
DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
dereg_mr_cb(&mr->pmd_mr);
- if (mr->ms_bmp != NULL)
- rte_bitmap_free(mr->ms_bmp);
+ rte_bitmap_free(mr->ms_bmp);
mlx5_free(mr);
}
* list is on the shared memory, following LKey lookup should succeed unless the
* request fails.
*
- * @param pd
- * Pointer to pd of a device (net, regex, vdpa,...).
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param share_cache
* Pointer to a global shared MR cache.
* @param[out] entry
* created. If failed to create one, this will not be updated.
* @param addr
* Target virtual address to register.
- * @param mr_ext_memseg_en
- * Configurable flag about external memory segment enable or not.
*
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
static uint32_t
-mlx5_mr_create_secondary(void *pd __rte_unused,
- struct mlx5_mp_id *mp_id,
+mlx5_mr_create_secondary(struct mlx5_common_device *cdev,
struct mlx5_mr_share_cache *share_cache,
- struct mr_cache_entry *entry, uintptr_t addr,
- unsigned int mr_ext_memseg_en __rte_unused)
+ struct mr_cache_entry *entry, uintptr_t addr)
{
int ret;
- DRV_LOG(DEBUG, "port %u requesting MR creation for address (%p)",
- mp_id->port_id, (void *)addr);
- ret = mlx5_mp_req_mr_create(mp_id, addr);
+ DRV_LOG(DEBUG, "Requesting MR creation for address (%p)", (void *)addr);
+ ret = mlx5_mp_req_mr_create(cdev, addr);
if (ret) {
DRV_LOG(DEBUG, "Fail to request MR creation for address (%p)",
- (void *)addr);
+ (void *)addr);
return UINT32_MAX;
}
rte_rwlock_read_lock(&share_cache->rwlock);
MLX5_ASSERT(entry->lkey != UINT32_MAX);
rte_rwlock_read_unlock(&share_cache->rwlock);
DRV_LOG(DEBUG, "MR CREATED by primary process for %p:\n"
- " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
- (void *)addr, entry->start, entry->end, entry->lkey);
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
+ (void *)addr, entry->start, entry->end, entry->lkey);
return entry->lkey;
}
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-uint32_t
+static uint32_t
mlx5_mr_create_primary(void *pd,
struct mlx5_mr_share_cache *share_cache,
struct mr_cache_entry *entry, uintptr_t addr,
* Create a new global Memory Region (MR) for a missing virtual address.
* This can be called from primary and secondary process.
*
- * @param pd
- * Pointer to pd handle of a device (net, regex, vdpa,...).
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param share_cache
* Pointer to a global shared MR cache.
* @param[out] entry
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-static uint32_t
-mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id,
+uint32_t
+mlx5_mr_create(struct mlx5_common_device *cdev,
struct mlx5_mr_share_cache *share_cache,
- struct mr_cache_entry *entry, uintptr_t addr,
- unsigned int mr_ext_memseg_en)
+ struct mr_cache_entry *entry, uintptr_t addr)
{
uint32_t ret = 0;
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
- ret = mlx5_mr_create_primary(pd, share_cache, entry,
- addr, mr_ext_memseg_en);
+ ret = mlx5_mr_create_primary(cdev->pd, share_cache, entry, addr,
+ cdev->config.mr_ext_memseg_en);
break;
case RTE_PROC_SECONDARY:
- ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry,
- addr, mr_ext_memseg_en);
+ ret = mlx5_mr_create_secondary(cdev, share_cache, entry, addr);
break;
default:
break;
* Look up address in the global MR cache table. If not found, create a new MR.
* Insert the found/created entry to local bottom-half cache table.
*
- * @param pd
- * Pointer to pd of a device (net, regex, vdpa,...).
- * @param share_cache
- * Pointer to a global shared MR cache.
* @param mr_ctrl
* Pointer to per-queue MR control structure.
* @param[out] entry
* Searched LKey on success, UINT32_MAX on no match.
*/
static uint32_t
-mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id,
- struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
- struct mr_cache_entry *entry, uintptr_t addr,
- unsigned int mr_ext_memseg_en)
+mr_lookup_caches(struct mlx5_mr_ctrl *mr_ctrl,
+ struct mr_cache_entry *entry, uintptr_t addr)
{
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
+ struct mlx5_common_device *cdev =
+ container_of(share_cache, struct mlx5_common_device, mr_scache);
struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
uint32_t lkey;
uint16_t idx;
}
rte_rwlock_read_unlock(&share_cache->rwlock);
/* First time to see the address? Create a new MR. */
- lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr,
- mr_ext_memseg_en);
+ lkey = mlx5_mr_create(cdev, share_cache, entry, addr);
/*
* Update the local cache if successfully created a new global MR. Even
* if failed to create one, there's no action to take in this datapath
* misses, search in the global MR cache table and update the new entry to
* per-queue local caches.
*
- * @param pd
- * Pointer to pd of a device (net, regex, vdpa,...).
- * @param share_cache
- * Pointer to a global shared MR cache.
* @param mr_ctrl
* Pointer to per-queue MR control structure.
* @param addr
* @return
* Searched LKey on success, UINT32_MAX on no match.
*/
-uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
- struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
- uintptr_t addr, unsigned int mr_ext_memseg_en)
+static uint32_t
+mlx5_mr_addr2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr)
{
uint32_t lkey;
uint16_t bh_idx = 0;
* and local cache_bh[] will be updated inside if possible.
* Top-half cache entry will also be updated.
*/
- lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl,
- repl, addr, mr_ext_memseg_en);
+ lkey = mr_lookup_caches(mr_ctrl, repl, addr);
if (unlikely(lkey == UINT32_MAX))
return UINT32_MAX;
}
}
/**
- * Release all the created MRs and resources on global MR cache of a device.
+ * Release all the created MRs and resources on global MR cache of a device
* list.
*
* @param share_cache
mlx5_mr_garbage_collect(share_cache);
}
+/**
+ * Initialize global MR cache of a device.
+ *
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket)
+{
+ /* Set the reg_mr and dereg_mr callback functions */
+ mlx5_os_set_reg_mr_cb(&share_cache->reg_mr_cb,
+ &share_cache->dereg_mr_cb);
+ rte_rwlock_init(&share_cache->rwlock);
+ rte_rwlock_init(&share_cache->mprwlock);
+ share_cache->mp_cb_registered = 0;
+ /* Initialize B-tree and allocate memory for global MR cache table. */
+ return mlx5_mr_btree_init(&share_cache->cache,
+ MLX5_MR_BTREE_CACHE_N * 2, socket);
+}
+
/**
* Flush all of the local cache entries.
*
return mr;
}
+/**
+ * Callback for memory free event. Iterate freed memsegs and check whether it
+ * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
+ * result, the MR would be fragmented. If it becomes empty, the MR will be freed
+ * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
+ * secondary process, the garbage collector will be called in primary process
+ * as the secondary process can't call mlx5_mr_create().
+ *
+ * The global cache must be rebuilt if there's any change and this event has to
+ * be propagated to dataplane threads to flush the local caches.
+ *
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param ibdev_name
+ * Name of ibv device.
+ * @param addr
+ * Address of freed memory.
+ * @param len
+ * Size of freed memory.
+ */
+void
+mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
+ const char *ibdev_name, const void *addr, size_t len)
+{
+ const struct rte_memseg_list *msl;
+ struct mlx5_mr *mr;
+ int ms_n;
+ int i;
+ int rebuild = 0;
+
+ DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu",
+ ibdev_name, addr, len);
+ msl = rte_mem_virt2memseg_list(addr);
+ /* addr and len must be page-aligned. */
+ MLX5_ASSERT((uintptr_t)addr ==
+ RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
+ ms_n = len / msl->page_sz;
+ rte_rwlock_write_lock(&share_cache->rwlock);
+ /* Clear bits of freed memsegs from MR. */
+ for (i = 0; i < ms_n; ++i) {
+ const struct rte_memseg *ms;
+ struct mr_cache_entry entry;
+ uintptr_t start;
+ int ms_idx;
+ uint32_t pos;
+
+ /* Find MR having this memseg. */
+ start = (uintptr_t)addr + i * msl->page_sz;
+ mr = mlx5_mr_lookup_list(share_cache, &entry, start);
+ if (mr == NULL)
+ continue;
+ MLX5_ASSERT(mr->msl); /* Can't be external memory. */
+ ms = rte_mem_virt2memseg((void *)start, msl);
+ MLX5_ASSERT(ms != NULL);
+ MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
+ ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ pos = ms_idx - mr->ms_base_idx;
+ MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
+ MLX5_ASSERT(pos < mr->ms_bmp_n);
+ DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p",
+ ibdev_name, (void *)mr, pos, (void *)start);
+ rte_bitmap_clear(mr->ms_bmp, pos);
+ if (--mr->ms_n == 0) {
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
+ DRV_LOG(DEBUG, "device %s remove MR(%p) from list",
+ ibdev_name, (void *)mr);
+ }
+ /*
+ * MR is fragmented or will be freed. the global cache must be
+ * rebuilt.
+ */
+ rebuild = 1;
+ }
+ if (rebuild) {
+ mlx5_mr_rebuild_cache(share_cache);
+ /*
+ * No explicit wmb is needed after updating dev_gen due to
+ * store-release ordering in unlock that provides the
+ * implicit barrier at the software visible level.
+ */
+ ++share_cache->dev_gen;
+ DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
+ share_cache->dev_gen);
+ }
+ rte_rwlock_write_unlock(&share_cache->rwlock);
+}
+
/**
* Dump all the created MRs and the global cache entries.
*
- * @param sh
- * Pointer to Ethernet device shared context.
+ * @param share_cache
+ * Pointer to a global shared MR cache.
*/
void
mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
rte_rwlock_read_unlock(&share_cache->rwlock);
#endif
}
+
+static int
+mlx5_range_compare_start(const void *lhs, const void *rhs)
+{
+ const struct mlx5_range *r1 = lhs, *r2 = rhs;
+
+ if (r1->start > r2->start)
+ return 1;
+ else if (r1->start < r2->start)
+ return -1;
+ return 0;
+}
+
+static void
+mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned int idx)
+{
+ struct mlx5_range *ranges = opaque, *range = &ranges[idx];
+ uintptr_t start = (uintptr_t)memhdr->addr;
+ uint64_t page_size = rte_mem_page_size();
+
+ RTE_SET_USED(mp);
+ range->start = RTE_ALIGN_FLOOR(start, page_size);
+ range->end = RTE_ALIGN_CEIL(start + memhdr->len, page_size);
+}
+
+/**
+ * Collect page-aligned memory ranges of the mempool.
+ */
+static int
+mlx5_mempool_get_chunks(struct rte_mempool *mp, struct mlx5_range **out,
+ unsigned int *out_n)
+{
+ unsigned int n;
+
+ DRV_LOG(DEBUG, "Collecting chunks of regular mempool %s", mp->name);
+ n = mp->nb_mem_chunks;
+ *out = calloc(sizeof(**out), n);
+ if (*out == NULL)
+ return -1;
+ rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, *out);
+ *out_n = n;
+ return 0;
+}
+
+struct mlx5_mempool_get_extmem_data {
+ struct mlx5_range *heap;
+ unsigned int heap_size;
+ int ret;
+};
+
+static void
+mlx5_mempool_get_extmem_cb(struct rte_mempool *mp, void *opaque,
+ void *obj, unsigned int obj_idx)
+{
+ struct mlx5_mempool_get_extmem_data *data = opaque;
+ struct rte_mbuf *mbuf = obj;
+ uintptr_t addr = (uintptr_t)mbuf->buf_addr;
+ struct mlx5_range *seg, *heap;
+ struct rte_memseg_list *msl;
+ size_t page_size;
+ uintptr_t page_start;
+ unsigned int pos = 0, len = data->heap_size, delta;
+
+ RTE_SET_USED(mp);
+ RTE_SET_USED(obj_idx);
+ if (data->ret < 0)
+ return;
+ /* Binary search for an already visited page. */
+ while (len > 1) {
+ delta = len / 2;
+ if (addr < data->heap[pos + delta].start) {
+ len = delta;
+ } else {
+ pos += delta;
+ len -= delta;
+ }
+ }
+ if (data->heap != NULL) {
+ seg = &data->heap[pos];
+ if (seg->start <= addr && addr < seg->end)
+ return;
+ }
+ /* Determine the page boundaries and remember them. */
+ heap = realloc(data->heap, sizeof(heap[0]) * (data->heap_size + 1));
+ if (heap == NULL) {
+ free(data->heap);
+ data->heap = NULL;
+ data->ret = -1;
+ return;
+ }
+ data->heap = heap;
+ data->heap_size++;
+ seg = &heap[data->heap_size - 1];
+ msl = rte_mem_virt2memseg_list((void *)addr);
+ page_size = msl != NULL ? msl->page_sz : rte_mem_page_size();
+ page_start = RTE_PTR_ALIGN_FLOOR(addr, page_size);
+ seg->start = page_start;
+ seg->end = page_start + page_size;
+ /* Maintain the heap order. */
+ qsort(data->heap, data->heap_size, sizeof(heap[0]),
+ mlx5_range_compare_start);
+}
+
+/**
+ * Recover pages of external memory as close as possible
+ * for a mempool with RTE_PKTMBUF_POOL_PINNED_EXT_BUF.
+ * Pages are stored in a heap for efficient search, for mbufs are many.
+ */
+static int
+mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range **out,
+ unsigned int *out_n)
+{
+ struct mlx5_mempool_get_extmem_data data;
+
+ DRV_LOG(DEBUG, "Recovering external pinned pages of mempool %s",
+ mp->name);
+ memset(&data, 0, sizeof(data));
+ rte_mempool_obj_iter(mp, mlx5_mempool_get_extmem_cb, &data);
+ *out = data.heap;
+ *out_n = data.heap_size;
+ return data.ret;
+}
+
+/**
+ * Get VA-contiguous ranges of the mempool memory.
+ * Each range start and end is aligned to the system page size.
+ *
+ * @param[in] mp
+ * Analyzed mempool.
+ * @param[in] is_extmem
+ * Whether the pool is contains only external pinned buffers.
+ * @param[out] out
+ * Receives the ranges, caller must release it with free().
+ * @param[out] out_n
+ * Receives the number of @p out elements.
+ *
+ * @return
+ * 0 on success, (-1) on failure.
+ */
+static int
+mlx5_get_mempool_ranges(struct rte_mempool *mp, bool is_extmem,
+ struct mlx5_range **out, unsigned int *out_n)
+{
+ struct mlx5_range *chunks;
+ unsigned int chunks_n, contig_n, i;
+ int ret;
+
+ /* Collect the pool underlying memory. */
+ ret = is_extmem ? mlx5_mempool_get_extmem(mp, &chunks, &chunks_n) :
+ mlx5_mempool_get_chunks(mp, &chunks, &chunks_n);
+ if (ret < 0)
+ return ret;
+ /* Merge adjacent chunks and place them at the beginning. */
+ qsort(chunks, chunks_n, sizeof(chunks[0]), mlx5_range_compare_start);
+ contig_n = 1;
+ for (i = 1; i < chunks_n; i++)
+ if (chunks[i - 1].end != chunks[i].start) {
+ chunks[contig_n - 1].end = chunks[i - 1].end;
+ chunks[contig_n] = chunks[i];
+ contig_n++;
+ }
+ /* Extend the last contiguous chunk to the end of the mempool. */
+ chunks[contig_n - 1].end = chunks[i - 1].end;
+ *out = chunks;
+ *out_n = contig_n;
+ return 0;
+}
+
+/**
+ * Analyze mempool memory to select memory ranges to register.
+ *
+ * @param[in] mp
+ * Mempool to analyze.
+ * @param[in] is_extmem
+ * Whether the pool is contains only external pinned buffers.
+ * @param[out] out
+ * Receives memory ranges to register, aligned to the system page size.
+ * The caller must release them with free().
+ * @param[out] out_n
+ * Receives the number of @p out items.
+ * @param[out] share_hugepage
+ * Receives True if the entire pool resides within a single hugepage.
+ *
+ * @return
+ * 0 on success, (-1) on failure.
+ */
+static int
+mlx5_mempool_reg_analyze(struct rte_mempool *mp, bool is_extmem,
+ struct mlx5_range **out, unsigned int *out_n,
+ bool *share_hugepage)
+{
+ struct mlx5_range *ranges = NULL;
+ unsigned int i, ranges_n = 0;
+ struct rte_memseg_list *msl;
+
+ if (mlx5_get_mempool_ranges(mp, is_extmem, &ranges, &ranges_n) < 0) {
+ DRV_LOG(ERR, "Cannot get address ranges for mempool %s",
+ mp->name);
+ return -1;
+ }
+ /* Check if the hugepage of the pool can be shared. */
+ *share_hugepage = false;
+ msl = rte_mem_virt2memseg_list((void *)ranges[0].start);
+ if (msl != NULL) {
+ uint64_t hugepage_sz = 0;
+
+ /* Check that all ranges are on pages of the same size. */
+ for (i = 0; i < ranges_n; i++) {
+ if (hugepage_sz != 0 && hugepage_sz != msl->page_sz)
+ break;
+ hugepage_sz = msl->page_sz;
+ }
+ if (i == ranges_n) {
+ /*
+ * If the entire pool is within one hugepage,
+ * combine all ranges into one of the hugepage size.
+ */
+ uintptr_t reg_start = ranges[0].start;
+ uintptr_t reg_end = ranges[ranges_n - 1].end;
+ uintptr_t hugepage_start =
+ RTE_ALIGN_FLOOR(reg_start, hugepage_sz);
+ uintptr_t hugepage_end = hugepage_start + hugepage_sz;
+ if (reg_end < hugepage_end) {
+ ranges[0].start = hugepage_start;
+ ranges[0].end = hugepage_end;
+ ranges_n = 1;
+ *share_hugepage = true;
+ }
+ }
+ }
+ *out = ranges;
+ *out_n = ranges_n;
+ return 0;
+}
+
+/** Create a registration object for the mempool. */
+static struct mlx5_mempool_reg *
+mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n,
+ bool is_extmem)
+{
+ struct mlx5_mempool_reg *mpr = NULL;
+
+ mpr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ sizeof(struct mlx5_mempool_reg),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ if (mpr == NULL) {
+ DRV_LOG(ERR, "Cannot allocate mempool %s registration object",
+ mp->name);
+ return NULL;
+ }
+ mpr->mrs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ mrs_n * sizeof(struct mlx5_mempool_mr),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ if (!mpr->mrs) {
+ DRV_LOG(ERR, "Cannot allocate mempool %s registration MRs",
+ mp->name);
+ mlx5_free(mpr);
+ return NULL;
+ }
+ mpr->mp = mp;
+ mpr->mrs_n = mrs_n;
+ mpr->is_extmem = is_extmem;
+ return mpr;
+}
+
+/**
+ * Destroy a mempool registration object.
+ *
+ * @param standalone
+ * Whether @p mpr owns its MRs exclusively, i.e. they are not shared.
+ */
+static void
+mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
+ struct mlx5_mempool_reg *mpr, bool standalone)
+{
+ if (standalone) {
+ unsigned int i;
+
+ for (i = 0; i < mpr->mrs_n; i++)
+ share_cache->dereg_mr_cb(&mpr->mrs[i].pmd_mr);
+ mlx5_free(mpr->mrs);
+ }
+ mlx5_free(mpr);
+}
+
+/** Find registration object of a mempool. */
+static struct mlx5_mempool_reg *
+mlx5_mempool_reg_lookup(struct mlx5_mr_share_cache *share_cache,
+ struct rte_mempool *mp)
+{
+ struct mlx5_mempool_reg *mpr;
+
+ LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
+ if (mpr->mp == mp)
+ break;
+ return mpr;
+}
+
+/** Increment reference counters of MRs used in the registration. */
+static void
+mlx5_mempool_reg_attach(struct mlx5_mempool_reg *mpr)
+{
+ unsigned int i;
+
+ for (i = 0; i < mpr->mrs_n; i++)
+ __atomic_add_fetch(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
+}
+
+/**
+ * Decrement reference counters of MRs used in the registration.
+ *
+ * @return True if no more references to @p mpr MRs exist, False otherwise.
+ */
+static bool
+mlx5_mempool_reg_detach(struct mlx5_mempool_reg *mpr)
+{
+ unsigned int i;
+ bool ret = false;
+
+ for (i = 0; i < mpr->mrs_n; i++)
+ ret |= __atomic_sub_fetch(&mpr->mrs[i].refcnt, 1,
+ __ATOMIC_RELAXED) == 0;
+ return ret;
+}
+
+static int
+mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
+ void *pd, struct rte_mempool *mp,
+ bool is_extmem)
+{
+ struct mlx5_range *ranges = NULL;
+ struct mlx5_mempool_reg *mpr, *old_mpr, *new_mpr;
+ unsigned int i, ranges_n;
+ bool share_hugepage, standalone = false;
+ int ret = -1;
+
+ /* Early check to avoid unnecessary creation of MRs. */
+ rte_rwlock_read_lock(&share_cache->rwlock);
+ old_mpr = mlx5_mempool_reg_lookup(share_cache, mp);
+ rte_rwlock_read_unlock(&share_cache->rwlock);
+ if (old_mpr != NULL && (!is_extmem || old_mpr->is_extmem)) {
+ DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
+ mp->name, pd);
+ rte_errno = EEXIST;
+ goto exit;
+ }
+ if (mlx5_mempool_reg_analyze(mp, is_extmem, &ranges, &ranges_n,
+ &share_hugepage) < 0) {
+ DRV_LOG(ERR, "Cannot get mempool %s memory ranges", mp->name);
+ rte_errno = ENOMEM;
+ goto exit;
+ }
+ new_mpr = mlx5_mempool_reg_create(mp, ranges_n, is_extmem);
+ if (new_mpr == NULL) {
+ DRV_LOG(ERR,
+ "Cannot create a registration object for mempool %s in PD %p",
+ mp->name, pd);
+ rte_errno = ENOMEM;
+ goto exit;
+ }
+ /*
+ * If the entire mempool fits in a single hugepage, the MR for this
+ * hugepage can be shared across mempools that also fit in it.
+ */
+ if (share_hugepage) {
+ rte_rwlock_write_lock(&share_cache->rwlock);
+ LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next) {
+ if (mpr->mrs[0].pmd_mr.addr == (void *)ranges[0].start)
+ break;
+ }
+ if (mpr != NULL) {
+ new_mpr->mrs = mpr->mrs;
+ mlx5_mempool_reg_attach(new_mpr);
+ LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
+ new_mpr, next);
+ }
+ rte_rwlock_write_unlock(&share_cache->rwlock);
+ if (mpr != NULL) {
+ DRV_LOG(DEBUG, "Shared MR %#x in PD %p for mempool %s with mempool %s",
+ mpr->mrs[0].pmd_mr.lkey, pd, mp->name,
+ mpr->mp->name);
+ ret = 0;
+ goto exit;
+ }
+ }
+ for (i = 0; i < ranges_n; i++) {
+ struct mlx5_mempool_mr *mr = &new_mpr->mrs[i];
+ const struct mlx5_range *range = &ranges[i];
+ size_t len = range->end - range->start;
+
+ if (share_cache->reg_mr_cb(pd, (void *)range->start, len,
+ &mr->pmd_mr) < 0) {
+ DRV_LOG(ERR,
+ "Failed to create an MR in PD %p for address range "
+ "[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
+ pd, range->start, range->end, len, mp->name);
+ break;
+ }
+ DRV_LOG(DEBUG,
+ "Created a new MR %#x in PD %p for address range "
+ "[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
+ mr->pmd_mr.lkey, pd, range->start, range->end, len,
+ mp->name);
+ }
+ if (i != ranges_n) {
+ mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
+ rte_errno = EINVAL;
+ goto exit;
+ }
+ /* Concurrent registration is not supposed to happen. */
+ rte_rwlock_write_lock(&share_cache->rwlock);
+ mpr = mlx5_mempool_reg_lookup(share_cache, mp);
+ if (mpr == old_mpr && old_mpr != NULL) {
+ LIST_REMOVE(old_mpr, next);
+ standalone = mlx5_mempool_reg_detach(mpr);
+ /* No need to flush the cache: old MRs cannot be in use. */
+ mpr = NULL;
+ }
+ if (mpr == NULL) {
+ mlx5_mempool_reg_attach(new_mpr);
+ LIST_INSERT_HEAD(&share_cache->mempool_reg_list, new_mpr, next);
+ ret = 0;
+ }
+ rte_rwlock_write_unlock(&share_cache->rwlock);
+ if (mpr != NULL) {
+ DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
+ mp->name, pd);
+ mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
+ rte_errno = EEXIST;
+ goto exit;
+ } else if (old_mpr != NULL) {
+ DRV_LOG(DEBUG, "Mempool %s registration for PD %p updated for external memory",
+ mp->name, pd);
+ mlx5_mempool_reg_destroy(share_cache, old_mpr, standalone);
+ }
+exit:
+ free(ranges);
+ return ret;
+}
+
+static int
+mlx5_mr_mempool_register_secondary(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp, bool is_extmem)
+{
+ return mlx5_mp_req_mempool_reg(cdev, mp, true, is_extmem);
+}
+
+/**
+ * Register the memory of a mempool in the protection domain.
+ *
+ * @param cdev
+ * Pointer to the mlx5 common device.
+ * @param mp
+ * Mempool to register.
+ *
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+mlx5_mr_mempool_register(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp, bool is_extmem)
+{
+ if (mp->flags & RTE_MEMPOOL_F_NON_IO)
+ return 0;
+ switch (rte_eal_process_type()) {
+ case RTE_PROC_PRIMARY:
+ return mlx5_mr_mempool_register_primary(&cdev->mr_scache,
+ cdev->pd, mp,
+ is_extmem);
+ case RTE_PROC_SECONDARY:
+ return mlx5_mr_mempool_register_secondary(cdev, mp, is_extmem);
+ default:
+ return -1;
+ }
+}
+
+static int
+mlx5_mr_mempool_unregister_primary(struct mlx5_mr_share_cache *share_cache,
+ struct rte_mempool *mp)
+{
+ struct mlx5_mempool_reg *mpr;
+ bool standalone = false;
+
+ rte_rwlock_write_lock(&share_cache->rwlock);
+ LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
+ if (mpr->mp == mp) {
+ LIST_REMOVE(mpr, next);
+ standalone = mlx5_mempool_reg_detach(mpr);
+ if (standalone)
+ /*
+ * The unlock operation below provides a memory
+ * barrier due to its store-release semantics.
+ */
+ ++share_cache->dev_gen;
+ break;
+ }
+ rte_rwlock_write_unlock(&share_cache->rwlock);
+ if (mpr == NULL) {
+ rte_errno = ENOENT;
+ return -1;
+ }
+ mlx5_mempool_reg_destroy(share_cache, mpr, standalone);
+ return 0;
+}
+
+static int
+mlx5_mr_mempool_unregister_secondary(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp)
+{
+ return mlx5_mp_req_mempool_reg(cdev, mp, false, false /* is_extmem */);
+}
+
+/**
+ * Unregister the memory of a mempool from the protection domain.
+ *
+ * @param cdev
+ * Pointer to the mlx5 common device.
+ * @param mp
+ * Mempool to unregister.
+ *
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp)
+{
+ if (mp->flags & RTE_MEMPOOL_F_NON_IO)
+ return 0;
+ switch (rte_eal_process_type()) {
+ case RTE_PROC_PRIMARY:
+ return mlx5_mr_mempool_unregister_primary(&cdev->mr_scache, mp);
+ case RTE_PROC_SECONDARY:
+ return mlx5_mr_mempool_unregister_secondary(cdev, mp);
+ default:
+ return -1;
+ }
+}
+
+/**
+ * Lookup a MR key by and address in a registered mempool.
+ *
+ * @param mpr
+ * Mempool registration object.
+ * @param addr
+ * Address within the mempool.
+ * @param entry
+ * Bottom-half cache entry to fill.
+ *
+ * @return
+ * MR key or UINT32_MAX on failure, which can only happen
+ * if the address is not from within the mempool.
+ */
+static uint32_t
+mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,
+ struct mr_cache_entry *entry)
+{
+ uint32_t lkey = UINT32_MAX;
+ unsigned int i;
+
+ for (i = 0; i < mpr->mrs_n; i++) {
+ const struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr;
+ uintptr_t mr_start = (uintptr_t)mr->addr;
+ uintptr_t mr_end = mr_start + mr->len;
+
+ if (mr_start <= addr && addr < mr_end) {
+ lkey = rte_cpu_to_be_32(mr->lkey);
+ entry->start = mr_start;
+ entry->end = mr_end;
+ entry->lkey = lkey;
+ break;
+ }
+ }
+ return lkey;
+}
+
+/**
+ * Update bottom-half cache from the list of mempool registrations.
+ *
+ * @param mr_ctrl
+ * Per-queue MR control handle.
+ * @param entry
+ * Pointer to an entry in the bottom-half cache to update
+ * with the MR lkey looked up.
+ * @param mp
+ * Mempool containing the address.
+ * @param addr
+ * Address to lookup.
+ * @return
+ * MR lkey on success, UINT32_MAX on failure.
+ */
+static uint32_t
+mlx5_lookup_mempool_regs(struct mlx5_mr_ctrl *mr_ctrl,
+ struct mr_cache_entry *entry,
+ struct rte_mempool *mp, uintptr_t addr)
+{
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
+ struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
+ struct mlx5_mempool_reg *mpr;
+ uint32_t lkey = UINT32_MAX;
+
+ /* If local cache table is full, try to double it. */
+ if (unlikely(bt->len == bt->size))
+ mr_btree_expand(bt, bt->size << 1);
+ /* Look up in mempool registrations. */
+ rte_rwlock_read_lock(&share_cache->rwlock);
+ mpr = mlx5_mempool_reg_lookup(share_cache, mp);
+ if (mpr != NULL)
+ lkey = mlx5_mempool_reg_addr2mr(mpr, addr, entry);
+ rte_rwlock_read_unlock(&share_cache->rwlock);
+ /*
+ * Update local cache. Even if it fails, return the found entry
+ * to update top-half cache. Next time, this entry will be found
+ * in the global cache.
+ */
+ if (lkey != UINT32_MAX)
+ mr_btree_insert(bt, entry);
+ return lkey;
+}
+
+/**
+ * Populate cache with LKeys of all MRs used by the mempool.
+ * It is intended to be used to register Rx mempools in advance.
+ *
+ * @param mr_ctrl
+ * Per-queue MR control handle.
+ * @param mp
+ * Registered memory pool.
+ *
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
+ struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
+ struct mlx5_mempool_reg *mpr;
+ unsigned int i;
+
+ /*
+ * Registration is valid after the lock is released,
+ * because the function is called after the mempool is registered.
+ */
+ rte_rwlock_read_lock(&share_cache->rwlock);
+ mpr = mlx5_mempool_reg_lookup(share_cache, mp);
+ rte_rwlock_read_unlock(&share_cache->rwlock);
+ if (mpr == NULL) {
+ DRV_LOG(ERR, "Mempool %s is not registered", mp->name);
+ rte_errno = ENOENT;
+ return -1;
+ }
+ for (i = 0; i < mpr->mrs_n; i++) {
+ struct mlx5_mempool_mr *mr = &mpr->mrs[i];
+ struct mr_cache_entry entry;
+ uint32_t lkey;
+ uint16_t idx;
+
+ lkey = mr_btree_lookup(bt, &idx, (uintptr_t)mr->pmd_mr.addr);
+ if (lkey != UINT32_MAX)
+ continue;
+ if (bt->len == bt->size)
+ mr_btree_expand(bt, bt->size << 1);
+ entry.start = (uintptr_t)mr->pmd_mr.addr;
+ entry.end = entry.start + mr->pmd_mr.len;
+ entry.lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
+ if (mr_btree_insert(bt, &entry) < 0) {
+ DRV_LOG(ERR, "Cannot insert cache entry for mempool %s MR %08x",
+ mp->name, entry.lkey);
+ rte_errno = EINVAL;
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Bottom-half lookup for the address from the mempool.
+ *
+ * @param mr_ctrl
+ * Per-queue MR control handle.
+ * @param mp
+ * Mempool containing the address.
+ * @param addr
+ * Address to lookup.
+ * @return
+ * MR lkey on success, UINT32_MAX on failure.
+ */
+uint32_t
+mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp, uintptr_t addr)
+{
+ struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
+ uint32_t lkey;
+ uint16_t bh_idx = 0;
+
+ /* Binary-search MR translation table. */
+ lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
+ /* Update top-half cache. */
+ if (likely(lkey != UINT32_MAX)) {
+ *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
+ } else {
+ lkey = mlx5_lookup_mempool_regs(mr_ctrl, repl, mp, addr);
+ /* Can only fail if the address is not from the mempool. */
+ if (unlikely(lkey == UINT32_MAX))
+ return UINT32_MAX;
+ }
+ /* Update the most recently used entry. */
+ mr_ctrl->mru = mr_ctrl->head;
+ /* Point to the next victim, the oldest. */
+ mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
+ return lkey;
+}
+
+uint32_t
+mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
+{
+ struct rte_mempool *mp;
+ struct mlx5_mprq_buf *buf;
+ uint32_t lkey;
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
+ struct mlx5_common_device *cdev =
+ container_of(share_cache, struct mlx5_common_device, mr_scache);
+ bool external, mprq, pinned = false;
+
+ /* Recover MPRQ mempool. */
+ external = RTE_MBUF_HAS_EXTBUF(mb);
+ if (external && mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
+ mprq = true;
+ buf = mb->shinfo->fcb_opaque;
+ mp = buf->mp;
+ } else {
+ mprq = false;
+ mp = mlx5_mb2mp(mb);
+ pinned = rte_pktmbuf_priv_flags(mp) &
+ RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
+ }
+ if (!external || mprq || pinned) {
+ lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
+ if (lkey != UINT32_MAX)
+ return lkey;
+ /* MPRQ is always registered. */
+ MLX5_ASSERT(!mprq);
+ }
+ /* Register pinned external memory if the mempool is not used for Rx. */
+ if (cdev->config.mr_mempool_reg_en && pinned) {
+ if (mlx5_mr_mempool_register(cdev, mp, true) < 0)
+ return UINT32_MAX;
+ lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
+ MLX5_ASSERT(lkey != UINT32_MAX);
+ return lkey;
+ }
+ /* Fallback to generic mechanism in corner cases. */
+ return mlx5_mr_addr2mr_bh(mr_ctrl, addr);
+}