1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2020 Mellanox Technologies, Ltd
7 #include <rte_eal_memconfig.h>
8 #include <rte_eal_paging.h>
10 #include <rte_mempool.h>
11 #include <rte_malloc.h>
12 #include <rte_rwlock.h>
14 #include "mlx5_glue.h"
15 #include "mlx5_common.h"
16 #include "mlx5_common_mp.h"
17 #include "mlx5_common_mr.h"
18 #include "mlx5_common_os.h"
19 #include "mlx5_common_log.h"
20 #include "mlx5_malloc.h"
22 struct mr_find_contig_memsegs_data {
26 const struct rte_memseg_list *msl;
29 /* Virtual memory range. */
35 /** Memory region for a mempool. */
36 struct mlx5_mempool_mr {
37 struct mlx5_pmd_mr pmd_mr;
38 uint32_t refcnt; /**< Number of mempools sharing this MR. */
41 /* Mempool registration. */
42 struct mlx5_mempool_reg {
43 LIST_ENTRY(mlx5_mempool_reg) next;
44 /** Registered mempool, used to designate registrations. */
45 struct rte_mempool *mp;
46 /** Memory regions for the address ranges of the mempool. */
47 struct mlx5_mempool_mr *mrs;
48 /** Number of memory regions. */
53 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
55 struct mlx5_mprq_buf *buf = opaque;
57 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
58 rte_mempool_put(buf->mp, buf);
59 } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
60 __ATOMIC_RELAXED) == 0)) {
61 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
62 rte_mempool_put(buf->mp, buf);
67 * Expand B-tree table to a given size. Can't be called with holding
68 * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
71 * Pointer to B-tree structure.
73 * Number of entries for expansion.
76 * 0 on success, -1 on failure.
79 mr_btree_expand(struct mlx5_mr_btree *bt, int n)
87 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
88 * used inside if there's no room to expand. Because this is a quite
89 * rare case and a part of very slow path, it is very acceptable.
90 * Initially cache_bh[] will be given practically enough space and once
91 * it is expanded, expansion wouldn't be needed again ever.
93 mem = mlx5_realloc(bt->table, MLX5_MEM_RTE | MLX5_MEM_ZERO,
94 n * sizeof(struct mr_cache_entry), 0, SOCKET_ID_ANY);
96 /* Not an error, B-tree search will be skipped. */
97 DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
101 DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
109 * Look up LKey from given B-tree lookup table, store the last index and return
113 * Pointer to B-tree structure.
115 * Pointer to index. Even on search failure, returns index where it stops
116 * searching so that index can be used when inserting a new entry.
121 * Searched LKey on success, UINT32_MAX on no match.
124 mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
126 struct mr_cache_entry *lkp_tbl;
130 MLX5_ASSERT(bt != NULL);
131 lkp_tbl = *bt->table;
133 /* First entry must be NULL for comparison. */
134 MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
135 lkp_tbl[0].lkey == UINT32_MAX));
138 register uint16_t delta = n >> 1;
140 if (addr < lkp_tbl[base + delta].start) {
147 MLX5_ASSERT(addr >= lkp_tbl[base].start);
149 if (addr < lkp_tbl[base].end)
150 return lkp_tbl[base].lkey;
156 * Insert an entry to B-tree lookup table.
159 * Pointer to B-tree structure.
161 * Pointer to new entry to insert.
164 * 0 on success, -1 on failure.
167 mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
169 struct mr_cache_entry *lkp_tbl;
173 MLX5_ASSERT(bt != NULL);
174 MLX5_ASSERT(bt->len <= bt->size);
175 MLX5_ASSERT(bt->len > 0);
176 lkp_tbl = *bt->table;
177 /* Find out the slot for insertion. */
178 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
180 "abort insertion to B-tree(%p): already exist at"
181 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
182 (void *)bt, idx, entry->start, entry->end, entry->lkey);
183 /* Already exist, return. */
186 /* If table is full, return error. */
187 if (unlikely(bt->len == bt->size)) {
193 shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
195 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
196 lkp_tbl[idx] = *entry;
199 "inserted B-tree(%p)[%u],"
200 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
201 (void *)bt, idx, entry->start, entry->end, entry->lkey);
206 * Initialize B-tree and allocate memory for lookup table.
209 * Pointer to B-tree structure.
211 * Number of entries to allocate.
213 * NUMA socket on which memory must be allocated.
216 * 0 on success, a negative errno value otherwise and rte_errno is set.
219 mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
225 MLX5_ASSERT(!bt->table && !bt->size);
226 memset(bt, 0, sizeof(*bt));
227 bt->table = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
228 sizeof(struct mr_cache_entry) * n,
230 if (bt->table == NULL) {
233 "failed to allocate memory for btree cache on socket "
238 /* First entry must be NULL for binary search. */
239 (*bt->table)[bt->len++] = (struct mr_cache_entry) {
242 DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
243 (void *)bt, (void *)bt->table);
248 * Free B-tree resources.
251 * Pointer to B-tree structure.
254 mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
258 DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
259 (void *)bt, (void *)bt->table);
260 mlx5_free(bt->table);
261 memset(bt, 0, sizeof(*bt));
265 * Dump all the entries in a B-tree
268 * Pointer to B-tree structure.
271 mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
273 #ifdef RTE_LIBRTE_MLX5_DEBUG
275 struct mr_cache_entry *lkp_tbl;
279 lkp_tbl = *bt->table;
280 for (idx = 0; idx < bt->len; ++idx) {
281 struct mr_cache_entry *entry = &lkp_tbl[idx];
283 DRV_LOG(DEBUG, "B-tree(%p)[%u],"
284 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
285 (void *)bt, idx, entry->start, entry->end, entry->lkey);
291 * Initialize per-queue MR control descriptor.
294 * Pointer to MR control structure.
296 * Pointer to generation number of global cache.
298 * NUMA socket on which memory must be allocated.
301 * 0 on success, a negative errno value otherwise and rte_errno is set.
304 mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
307 if (mr_ctrl == NULL) {
311 /* Save pointer of global generation number to check memory event. */
312 mr_ctrl->dev_gen_ptr = dev_gen_ptr;
313 /* Initialize B-tree and allocate memory for bottom-half cache table. */
314 return mlx5_mr_btree_init(&mr_ctrl->cache_bh, MLX5_MR_BTREE_CACHE_N,
319 * Find virtually contiguous memory chunk in a given MR.
322 * Pointer to MR structure.
324 * Pointer to returning MR cache entry. If not found, this will not be
327 * Start index of the memseg bitmap.
330 * Next index to go on lookup.
333 mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry,
340 /* MR for external memory doesn't have memseg list. */
341 if (mr->msl == NULL) {
342 MLX5_ASSERT(mr->ms_bmp_n == 1);
343 MLX5_ASSERT(mr->ms_n == 1);
344 MLX5_ASSERT(base_idx == 0);
346 * Can't search it from memseg list but get it directly from
347 * pmd_mr as there's only one chunk.
349 entry->start = (uintptr_t)mr->pmd_mr.addr;
350 entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len;
351 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
352 /* Returning 1 ends iteration. */
355 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
356 if (rte_bitmap_get(mr->ms_bmp, idx)) {
357 const struct rte_memseg_list *msl;
358 const struct rte_memseg *ms;
361 ms = rte_fbarray_get(&msl->memseg_arr,
362 mr->ms_base_idx + idx);
363 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
366 end = ms->addr_64 + ms->hugepage_sz;
368 /* Passed the end of a fragment. */
373 /* Found one chunk. */
374 entry->start = start;
376 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
382 * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
383 * Then, this entry will have to be searched by mr_lookup_list() in
384 * mlx5_mr_create() on miss.
387 * Pointer to a global shared MR cache.
389 * Pointer to MR to insert.
392 * 0 on success, -1 on failure.
395 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
400 DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)",
401 (void *)mr, (void *)share_cache);
402 for (n = 0; n < mr->ms_bmp_n; ) {
403 struct mr_cache_entry entry;
405 memset(&entry, 0, sizeof(entry));
406 /* Find a contiguous chunk and advance the index. */
407 n = mr_find_next_chunk(mr, &entry, n);
410 if (mr_btree_insert(&share_cache->cache, &entry) < 0) {
412 * Overflowed, but the global table cannot be expanded
413 * because of deadlock.
422 * Look up address in the original global MR list.
425 * Pointer to a global shared MR cache.
427 * Pointer to returning MR cache entry. If no match, this will not be updated.
432 * Found MR on match, NULL otherwise.
435 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
436 struct mr_cache_entry *entry, uintptr_t addr)
440 /* Iterate all the existing MRs. */
441 LIST_FOREACH(mr, &share_cache->mr_list, mr) {
446 for (n = 0; n < mr->ms_bmp_n; ) {
447 struct mr_cache_entry ret;
449 memset(&ret, 0, sizeof(ret));
450 n = mr_find_next_chunk(mr, &ret, n);
451 if (addr >= ret.start && addr < ret.end) {
462 * Look up address on global MR cache.
465 * Pointer to a global shared MR cache.
467 * Pointer to returning MR cache entry. If no match, this will not be updated.
472 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
475 mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
476 struct mr_cache_entry *entry, uintptr_t addr)
479 uint32_t lkey = UINT32_MAX;
483 * If the global cache has overflowed since it failed to expand the
484 * B-tree table, it can't have all the existing MRs. Then, the address
485 * has to be searched by traversing the original MR list instead, which
486 * is very slow path. Otherwise, the global cache is all inclusive.
488 if (!unlikely(share_cache->cache.overflow)) {
489 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
490 if (lkey != UINT32_MAX)
491 *entry = (*share_cache->cache.table)[idx];
493 /* Falling back to the slowest path. */
494 mr = mlx5_mr_lookup_list(share_cache, entry, addr);
498 MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
504 * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
505 * can raise memory free event and the callback function will spin on the lock.
508 * Pointer to MR to free.
511 mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
515 DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
516 dereg_mr_cb(&mr->pmd_mr);
517 if (mr->ms_bmp != NULL)
518 rte_bitmap_free(mr->ms_bmp);
523 mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
527 DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
528 /* Flush cache to rebuild. */
529 share_cache->cache.len = 1;
530 share_cache->cache.overflow = 0;
531 /* Iterate all the existing MRs. */
532 LIST_FOREACH(mr, &share_cache->mr_list, mr)
533 if (mlx5_mr_insert_cache(share_cache, mr) < 0)
538 * Release resources of detached MR having no online entry.
541 * Pointer to a global shared MR cache.
544 mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
546 struct mlx5_mr *mr_next;
547 struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
549 /* Must be called from the primary process. */
550 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
552 * MR can't be freed with holding the lock because rte_free() could call
553 * memory free callback function. This will be a deadlock situation.
555 rte_rwlock_write_lock(&share_cache->rwlock);
556 /* Detach the whole free list and release it after unlocking. */
557 free_list = share_cache->mr_free_list;
558 LIST_INIT(&share_cache->mr_free_list);
559 rte_rwlock_write_unlock(&share_cache->rwlock);
560 /* Release resources. */
561 mr_next = LIST_FIRST(&free_list);
562 while (mr_next != NULL) {
563 struct mlx5_mr *mr = mr_next;
565 mr_next = LIST_NEXT(mr, mr);
566 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
570 /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
572 mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
573 const struct rte_memseg *ms, size_t len, void *arg)
575 struct mr_find_contig_memsegs_data *data = arg;
577 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
579 /* Found, save it and stop walking. */
580 data->start = ms->addr_64;
581 data->end = ms->addr_64 + len;
587 * Create a new global Memory Region (MR) for a missing virtual address.
588 * This API should be called on a secondary process, then a request is sent to
589 * the primary process in order to create a MR for the address. As the global MR
590 * list is on the shared memory, following LKey lookup should succeed unless the
594 * Pointer to pd of a device (net, regex, vdpa,...).
596 * Multi-process identifier, may be NULL for the primary process.
598 * Pointer to a global shared MR cache.
600 * Pointer to returning MR cache entry, found in the global cache or newly
601 * created. If failed to create one, this will not be updated.
603 * Target virtual address to register.
604 * @param mr_ext_memseg_en
605 * Configurable flag about external memory segment enable or not.
608 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
611 mlx5_mr_create_secondary(void *pd __rte_unused,
612 struct mlx5_mp_id *mp_id,
613 struct mlx5_mr_share_cache *share_cache,
614 struct mr_cache_entry *entry, uintptr_t addr,
615 unsigned int mr_ext_memseg_en __rte_unused)
623 DRV_LOG(DEBUG, "port %u requesting MR creation for address (%p)",
624 mp_id->port_id, (void *)addr);
625 ret = mlx5_mp_req_mr_create(mp_id, addr);
627 DRV_LOG(DEBUG, "Fail to request MR creation for address (%p)",
631 rte_rwlock_read_lock(&share_cache->rwlock);
632 /* Fill in output data. */
633 mlx5_mr_lookup_cache(share_cache, entry, addr);
634 /* Lookup can't fail. */
635 MLX5_ASSERT(entry->lkey != UINT32_MAX);
636 rte_rwlock_read_unlock(&share_cache->rwlock);
637 DRV_LOG(DEBUG, "MR CREATED by primary process for %p:\n"
638 " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
639 (void *)addr, entry->start, entry->end, entry->lkey);
644 * Create a new global Memory Region (MR) for a missing virtual address.
645 * Register entire virtually contiguous memory chunk around the address.
648 * Pointer to pd of a device (net, regex, vdpa,...).
650 * Pointer to a global shared MR cache.
652 * Pointer to returning MR cache entry, found in the global cache or newly
653 * created. If failed to create one, this will not be updated.
655 * Target virtual address to register.
656 * @param mr_ext_memseg_en
657 * Configurable flag about external memory segment enable or not.
660 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
663 mlx5_mr_create_primary(void *pd,
664 struct mlx5_mr_share_cache *share_cache,
665 struct mr_cache_entry *entry, uintptr_t addr,
666 unsigned int mr_ext_memseg_en)
668 struct mr_find_contig_memsegs_data data = {.addr = addr, };
669 struct mr_find_contig_memsegs_data data_re;
670 const struct rte_memseg_list *msl;
671 const struct rte_memseg *ms;
672 struct mlx5_mr *mr = NULL;
673 int ms_idx_shift = -1;
680 DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
682 * Release detached MRs if any. This can't be called with holding either
683 * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have
684 * been detached by the memory free event but it couldn't be released
685 * inside the callback due to deadlock. As a result, releasing resources
686 * is quite opportunistic.
688 mlx5_mr_garbage_collect(share_cache);
690 * If enabled, find out a contiguous virtual address chunk in use, to
691 * which the given address belongs, in order to register maximum range.
692 * In the best case where mempools are not dynamically recreated and
693 * '--socket-mem' is specified as an EAL option, it is very likely to
694 * have only one MR(LKey) per a socket and per a hugepage-size even
695 * though the system memory is highly fragmented. As the whole memory
696 * chunk will be pinned by kernel, it can't be reused unless entire
697 * chunk is freed from EAL.
699 * If disabled, just register one memseg (page). Then, memory
700 * consumption will be minimized but it may drop performance if there
701 * are many MRs to lookup on the datapath.
703 if (!mr_ext_memseg_en) {
704 data.msl = rte_mem_virt2memseg_list((void *)addr);
705 data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
706 data.end = data.start + data.msl->page_sz;
707 } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
709 "Unable to find virtually contiguous"
710 " chunk for address (%p)."
711 " rte_memseg_contig_walk() failed.", (void *)addr);
716 /* Addresses must be page-aligned. */
717 MLX5_ASSERT(data.msl);
718 MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
719 MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
721 ms = rte_mem_virt2memseg((void *)data.start, msl);
722 len = data.end - data.start;
724 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
725 /* Number of memsegs in the range. */
726 ms_n = len / msl->page_sz;
727 DRV_LOG(DEBUG, "Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
728 " page_sz=0x%" PRIx64 ", ms_n=%u",
729 (void *)addr, data.start, data.end, msl->page_sz, ms_n);
730 /* Size of memory for bitmap. */
731 bmp_size = rte_bitmap_get_memory_footprint(ms_n);
732 mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
733 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE) +
734 bmp_size, RTE_CACHE_LINE_SIZE, msl->socket_id);
736 DRV_LOG(DEBUG, "Unable to allocate memory for a new MR of"
737 " address (%p).", (void *)addr);
743 * Save the index of the first memseg and initialize memseg bitmap. To
744 * see if a memseg of ms_idx in the memseg-list is still valid, check:
745 * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
747 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
748 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
749 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
750 if (mr->ms_bmp == NULL) {
751 DRV_LOG(DEBUG, "Unable to initialize bitmap for a new MR of"
752 " address (%p).", (void *)addr);
757 * Should recheck whether the extended contiguous chunk is still valid.
758 * Because memory_hotplug_lock can't be held if there's any memory
759 * related calls in a critical path, resource allocation above can't be
760 * locked. If the memory has been changed at this point, try again with
761 * just single page. If not, go on with the big chunk atomically from
764 rte_mcfg_mem_read_lock();
766 if (len > msl->page_sz &&
767 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
769 "Unable to find virtually contiguous chunk for address "
770 "(%p). rte_memseg_contig_walk() failed.", (void *)addr);
774 if (data.start != data_re.start || data.end != data_re.end) {
776 * The extended contiguous chunk has been changed. Try again
777 * with single memseg instead.
779 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
780 data.end = data.start + msl->page_sz;
781 rte_mcfg_mem_read_unlock();
782 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
783 goto alloc_resources;
785 MLX5_ASSERT(data.msl == data_re.msl);
786 rte_rwlock_write_lock(&share_cache->rwlock);
788 * Check the address is really missing. If other thread already created
789 * one or it is not found due to overflow, abort and return.
791 if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) {
793 * Insert to the global cache table. It may fail due to
794 * low-on-memory. Then, this entry will have to be searched
797 mr_btree_insert(&share_cache->cache, entry);
798 DRV_LOG(DEBUG, "Found MR for %p on final lookup, abort",
800 rte_rwlock_write_unlock(&share_cache->rwlock);
801 rte_mcfg_mem_read_unlock();
803 * Must be unlocked before calling rte_free() because
804 * mlx5_mr_mem_event_free_cb() can be called inside.
806 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
810 * Trim start and end addresses for verbs MR. Set bits for registering
811 * memsegs but exclude already registered ones. Bitmap can be
814 for (n = 0; n < ms_n; ++n) {
816 struct mr_cache_entry ret;
818 memset(&ret, 0, sizeof(ret));
819 start = data_re.start + n * msl->page_sz;
820 /* Exclude memsegs already registered by other MRs. */
821 if (mlx5_mr_lookup_cache(share_cache, &ret, start) ==
824 * Start from the first unregistered memseg in the
827 if (ms_idx_shift == -1) {
828 mr->ms_base_idx += n;
832 data.end = start + msl->page_sz;
833 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
837 len = data.end - data.start;
838 mr->ms_bmp_n = len / msl->page_sz;
839 MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
841 * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
842 * be called with holding the memory lock because it doesn't use
843 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
844 * through mlx5_alloc_verbs_buf().
846 share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr);
847 if (mr->pmd_mr.obj == NULL) {
848 DRV_LOG(DEBUG, "Fail to create an MR for address (%p)",
853 MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start);
854 MLX5_ASSERT(mr->pmd_mr.len);
855 LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr);
856 DRV_LOG(DEBUG, "MR CREATED (%p) for %p:\n"
857 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
858 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
859 (void *)mr, (void *)addr, data.start, data.end,
860 rte_cpu_to_be_32(mr->pmd_mr.lkey),
861 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
862 /* Insert to the global cache table. */
863 mlx5_mr_insert_cache(share_cache, mr);
864 /* Fill in output data. */
865 mlx5_mr_lookup_cache(share_cache, entry, addr);
866 /* Lookup can't fail. */
867 MLX5_ASSERT(entry->lkey != UINT32_MAX);
868 rte_rwlock_write_unlock(&share_cache->rwlock);
869 rte_mcfg_mem_read_unlock();
872 rte_rwlock_write_unlock(&share_cache->rwlock);
874 rte_mcfg_mem_read_unlock();
877 * In case of error, as this can be called in a datapath, a warning
878 * message per an error is preferable instead. Must be unlocked before
879 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
882 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
887 * Create a new global Memory Region (MR) for a missing virtual address.
888 * This can be called from primary and secondary process.
891 * Pointer to pd handle of a device (net, regex, vdpa,...).
893 * Multi-process identifier, may be NULL for the primary process.
895 * Pointer to a global shared MR cache.
897 * Pointer to returning MR cache entry, found in the global cache or newly
898 * created. If failed to create one, this will not be updated.
900 * Target virtual address to register.
901 * @param mr_ext_memseg_en
902 * Configurable flag about external memory segment enable or not.
905 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
908 mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id,
909 struct mlx5_mr_share_cache *share_cache,
910 struct mr_cache_entry *entry, uintptr_t addr,
911 unsigned int mr_ext_memseg_en)
915 switch (rte_eal_process_type()) {
916 case RTE_PROC_PRIMARY:
917 ret = mlx5_mr_create_primary(pd, share_cache, entry,
918 addr, mr_ext_memseg_en);
920 case RTE_PROC_SECONDARY:
921 ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry,
922 addr, mr_ext_memseg_en);
931 * Look up address in the global MR cache table. If not found, create a new MR.
932 * Insert the found/created entry to local bottom-half cache table.
935 * Pointer to pd of a device (net, regex, vdpa,...).
937 * Multi-process identifier, may be NULL for the primary process.
939 * Pointer to a global shared MR cache.
941 * Pointer to per-queue MR control structure.
943 * Pointer to returning MR cache entry, found in the global cache or newly
944 * created. If failed to create one, this is not written.
947 * @param mr_ext_memseg_en
948 * Configurable flag about external memory segment enable or not.
951 * Searched LKey on success, UINT32_MAX on no match.
954 mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id,
955 struct mlx5_mr_share_cache *share_cache,
956 struct mlx5_mr_ctrl *mr_ctrl,
957 struct mr_cache_entry *entry, uintptr_t addr,
958 unsigned int mr_ext_memseg_en)
960 struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
964 /* If local cache table is full, try to double it. */
965 if (unlikely(bt->len == bt->size))
966 mr_btree_expand(bt, bt->size << 1);
967 /* Look up in the global cache. */
968 rte_rwlock_read_lock(&share_cache->rwlock);
969 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
970 if (lkey != UINT32_MAX) {
972 *entry = (*share_cache->cache.table)[idx];
973 rte_rwlock_read_unlock(&share_cache->rwlock);
975 * Update local cache. Even if it fails, return the found entry
976 * to update top-half cache. Next time, this entry will be found
977 * in the global cache.
979 mr_btree_insert(bt, entry);
982 rte_rwlock_read_unlock(&share_cache->rwlock);
983 /* First time to see the address? Create a new MR. */
984 lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr,
987 * Update the local cache if successfully created a new global MR. Even
988 * if failed to create one, there's no action to take in this datapath
989 * code. As returning LKey is invalid, this will eventually make HW
992 if (lkey != UINT32_MAX)
993 mr_btree_insert(bt, entry);
998 * Bottom-half of LKey search on datapath. First search in cache_bh[] and if
999 * misses, search in the global MR cache table and update the new entry to
1000 * per-queue local caches.
1003 * Pointer to pd of a device (net, regex, vdpa,...).
1005 * Multi-process identifier, may be NULL for the primary process.
1006 * @param share_cache
1007 * Pointer to a global shared MR cache.
1009 * Pointer to per-queue MR control structure.
1012 * @param mr_ext_memseg_en
1013 * Configurable flag about external memory segment enable or not.
1016 * Searched LKey on success, UINT32_MAX on no match.
1019 mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
1020 struct mlx5_mr_share_cache *share_cache,
1021 struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr,
1022 unsigned int mr_ext_memseg_en)
1025 uint16_t bh_idx = 0;
1026 /* Victim in top-half cache to replace with new entry. */
1027 struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
1029 /* Binary-search MR translation table. */
1030 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1031 /* Update top-half cache. */
1032 if (likely(lkey != UINT32_MAX)) {
1033 *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1036 * If missed in local lookup table, search in the global cache
1037 * and local cache_bh[] will be updated inside if possible.
1038 * Top-half cache entry will also be updated.
1040 lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl,
1041 repl, addr, mr_ext_memseg_en);
1042 if (unlikely(lkey == UINT32_MAX))
1045 /* Update the most recently used entry. */
1046 mr_ctrl->mru = mr_ctrl->head;
1047 /* Point to the next victim, the oldest. */
1048 mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1053 * Release all the created MRs and resources on global MR cache of a device
1056 * @param share_cache
1057 * Pointer to a global shared MR cache.
1060 mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
1062 struct mlx5_mr *mr_next;
1064 rte_rwlock_write_lock(&share_cache->rwlock);
1065 /* Detach from MR list and move to free list. */
1066 mr_next = LIST_FIRST(&share_cache->mr_list);
1067 while (mr_next != NULL) {
1068 struct mlx5_mr *mr = mr_next;
1070 mr_next = LIST_NEXT(mr, mr);
1071 LIST_REMOVE(mr, mr);
1072 LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
1074 LIST_INIT(&share_cache->mr_list);
1075 /* Free global cache. */
1076 mlx5_mr_btree_free(&share_cache->cache);
1077 rte_rwlock_write_unlock(&share_cache->rwlock);
1078 /* Free all remaining MRs. */
1079 mlx5_mr_garbage_collect(share_cache);
1083 * Initialize global MR cache of a device.
1085 * @param share_cache
1086 * Pointer to a global shared MR cache.
1088 * NUMA socket on which memory must be allocated.
1091 * 0 on success, a negative errno value otherwise and rte_errno is set.
1094 mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket)
1096 /* Set the reg_mr and dereg_mr callback functions */
1097 mlx5_os_set_reg_mr_cb(&share_cache->reg_mr_cb,
1098 &share_cache->dereg_mr_cb);
1099 rte_rwlock_init(&share_cache->rwlock);
1100 rte_rwlock_init(&share_cache->mprwlock);
1101 share_cache->mp_cb_registered = 0;
1102 /* Initialize B-tree and allocate memory for global MR cache table. */
1103 return mlx5_mr_btree_init(&share_cache->cache,
1104 MLX5_MR_BTREE_CACHE_N * 2, socket);
1108 * Flush all of the local cache entries.
1111 * Pointer to per-queue MR local cache.
1114 mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1116 /* Reset the most-recently-used index. */
1118 /* Reset the linear search array. */
1120 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1121 /* Reset the B-tree table. */
1122 mr_ctrl->cache_bh.len = 1;
1123 mr_ctrl->cache_bh.overflow = 0;
1124 /* Update the generation number. */
1125 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1126 DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1127 (void *)mr_ctrl, mr_ctrl->cur_gen);
1131 * Creates a memory region for external memory, that is memory which is not
1132 * part of the DPDK memory segments.
1135 * Pointer to pd of a device (net, regex, vdpa,...).
1137 * Starting virtual address of memory.
1139 * Length of memory segment being mapped.
1141 * Socket to allocate heap memory for the control structures.
1144 * Pointer to MR structure on success, NULL otherwise.
1147 mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
1148 mlx5_reg_mr_t reg_mr_cb)
1150 struct mlx5_mr *mr = NULL;
1152 mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1153 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE),
1154 RTE_CACHE_LINE_SIZE, socket_id);
1157 reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr);
1158 if (mr->pmd_mr.obj == NULL) {
1160 "Fail to create MR for address (%p)",
1165 mr->msl = NULL; /* Mark it is external memory. */
1170 "MR CREATED (%p) for external memory %p:\n"
1171 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1172 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1173 (void *)mr, (void *)addr,
1174 addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1175 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1180 * Callback for memory free event. Iterate freed memsegs and check whether it
1181 * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
1182 * result, the MR would be fragmented. If it becomes empty, the MR will be freed
1183 * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
1184 * secondary process, the garbage collector will be called in primary process
1185 * as the secondary process can't call mlx5_mr_create().
1187 * The global cache must be rebuilt if there's any change and this event has to
1188 * be propagated to dataplane threads to flush the local caches.
1190 * @param share_cache
1191 * Pointer to a global shared MR cache.
1193 * Name of ibv device.
1195 * Address of freed memory.
1197 * Size of freed memory.
1200 mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
1201 const char *ibdev_name, const void *addr, size_t len)
1203 const struct rte_memseg_list *msl;
1209 DRV_LOG(DEBUG, "device %s free callback: addr=%p, len=%zu",
1210 ibdev_name, addr, len);
1211 msl = rte_mem_virt2memseg_list(addr);
1212 /* addr and len must be page-aligned. */
1213 MLX5_ASSERT((uintptr_t)addr ==
1214 RTE_ALIGN((uintptr_t)addr, msl->page_sz));
1215 MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
1216 ms_n = len / msl->page_sz;
1217 rte_rwlock_write_lock(&share_cache->rwlock);
1218 /* Clear bits of freed memsegs from MR. */
1219 for (i = 0; i < ms_n; ++i) {
1220 const struct rte_memseg *ms;
1221 struct mr_cache_entry entry;
1226 /* Find MR having this memseg. */
1227 start = (uintptr_t)addr + i * msl->page_sz;
1228 mr = mlx5_mr_lookup_list(share_cache, &entry, start);
1231 MLX5_ASSERT(mr->msl); /* Can't be external memory. */
1232 ms = rte_mem_virt2memseg((void *)start, msl);
1233 MLX5_ASSERT(ms != NULL);
1234 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
1235 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
1236 pos = ms_idx - mr->ms_base_idx;
1237 MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
1238 MLX5_ASSERT(pos < mr->ms_bmp_n);
1239 DRV_LOG(DEBUG, "device %s MR(%p): clear bitmap[%u] for addr %p",
1240 ibdev_name, (void *)mr, pos, (void *)start);
1241 rte_bitmap_clear(mr->ms_bmp, pos);
1242 if (--mr->ms_n == 0) {
1243 LIST_REMOVE(mr, mr);
1244 LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
1245 DRV_LOG(DEBUG, "device %s remove MR(%p) from list",
1246 ibdev_name, (void *)mr);
1249 * MR is fragmented or will be freed. the global cache must be
1255 mlx5_mr_rebuild_cache(share_cache);
1257 * No explicit wmb is needed after updating dev_gen due to
1258 * store-release ordering in unlock that provides the
1259 * implicit barrier at the software visible level.
1261 ++share_cache->dev_gen;
1262 DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
1263 share_cache->dev_gen);
1265 rte_rwlock_write_unlock(&share_cache->rwlock);
1269 * Dump all the created MRs and the global cache entries.
1271 * @param share_cache
1272 * Pointer to a global shared MR cache.
1275 mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
1277 #ifdef RTE_LIBRTE_MLX5_DEBUG
1282 rte_rwlock_read_lock(&share_cache->rwlock);
1283 /* Iterate all the existing MRs. */
1284 LIST_FOREACH(mr, &share_cache->mr_list, mr) {
1287 DRV_LOG(DEBUG, "MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
1288 mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1289 mr->ms_n, mr->ms_bmp_n);
1292 for (n = 0; n < mr->ms_bmp_n; ) {
1293 struct mr_cache_entry ret = { 0, };
1295 n = mr_find_next_chunk(mr, &ret, n);
1299 " chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1300 chunk_n++, ret.start, ret.end);
1303 DRV_LOG(DEBUG, "Dumping global cache %p", (void *)share_cache);
1304 mlx5_mr_btree_dump(&share_cache->cache);
1305 rte_rwlock_read_unlock(&share_cache->rwlock);
1310 mlx5_range_compare_start(const void *lhs, const void *rhs)
1312 const struct mlx5_range *r1 = lhs, *r2 = rhs;
1314 if (r1->start > r2->start)
1316 else if (r1->start < r2->start)
1322 mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque,
1323 struct rte_mempool_memhdr *memhdr,
1326 struct mlx5_range *ranges = opaque, *range = &ranges[idx];
1327 uint64_t page_size = rte_mem_page_size();
1330 range->start = RTE_ALIGN_FLOOR((uintptr_t)memhdr->addr, page_size);
1331 range->end = RTE_ALIGN_CEIL(range->start + memhdr->len, page_size);
1335 * Get VA-contiguous ranges of the mempool memory.
1336 * Each range start and end is aligned to the system page size.
1341 * Receives the ranges, caller must release it with free().
1342 * @param[out] ount_n
1343 * Receives the number of @p out elements.
1346 * 0 on success, (-1) on failure.
1349 mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,
1350 unsigned int *out_n)
1352 struct mlx5_range *chunks;
1353 unsigned int chunks_n = mp->nb_mem_chunks, contig_n, i;
1355 /* Collect page-aligned memory ranges of the mempool. */
1356 chunks = calloc(sizeof(chunks[0]), chunks_n);
1359 rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, chunks);
1360 /* Merge adjacent chunks and place them at the beginning. */
1361 qsort(chunks, chunks_n, sizeof(chunks[0]), mlx5_range_compare_start);
1363 for (i = 1; i < chunks_n; i++)
1364 if (chunks[i - 1].end != chunks[i].start) {
1365 chunks[contig_n - 1].end = chunks[i - 1].end;
1366 chunks[contig_n] = chunks[i];
1369 /* Extend the last contiguous chunk to the end of the mempool. */
1370 chunks[contig_n - 1].end = chunks[i - 1].end;
1377 * Analyze mempool memory to select memory ranges to register.
1380 * Mempool to analyze.
1382 * Receives memory ranges to register, aligned to the system page size.
1383 * The caller must release them with free().
1385 * Receives the number of @p out items.
1386 * @param[out] share_hugepage
1387 * Receives True if the entire pool resides within a single hugepage.
1390 * 0 on success, (-1) on failure.
1393 mlx5_mempool_reg_analyze(struct rte_mempool *mp, struct mlx5_range **out,
1394 unsigned int *out_n, bool *share_hugepage)
1396 struct mlx5_range *ranges = NULL;
1397 unsigned int i, ranges_n = 0;
1398 struct rte_memseg_list *msl;
1400 if (mlx5_get_mempool_ranges(mp, &ranges, &ranges_n) < 0) {
1401 DRV_LOG(ERR, "Cannot get address ranges for mempool %s",
1405 /* Check if the hugepage of the pool can be shared. */
1406 *share_hugepage = false;
1407 msl = rte_mem_virt2memseg_list((void *)ranges[0].start);
1409 uint64_t hugepage_sz = 0;
1411 /* Check that all ranges are on pages of the same size. */
1412 for (i = 0; i < ranges_n; i++) {
1413 if (hugepage_sz != 0 && hugepage_sz != msl->page_sz)
1415 hugepage_sz = msl->page_sz;
1417 if (i == ranges_n) {
1419 * If the entire pool is within one hugepage,
1420 * combine all ranges into one of the hugepage size.
1422 uintptr_t reg_start = ranges[0].start;
1423 uintptr_t reg_end = ranges[ranges_n - 1].end;
1424 uintptr_t hugepage_start =
1425 RTE_ALIGN_FLOOR(reg_start, hugepage_sz);
1426 uintptr_t hugepage_end = hugepage_start + hugepage_sz;
1427 if (reg_end < hugepage_end) {
1428 ranges[0].start = hugepage_start;
1429 ranges[0].end = hugepage_end;
1431 *share_hugepage = true;
1440 /** Create a registration object for the mempool. */
1441 static struct mlx5_mempool_reg *
1442 mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n)
1444 struct mlx5_mempool_reg *mpr = NULL;
1446 mpr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1447 sizeof(*mpr) + mrs_n * sizeof(mpr->mrs[0]),
1448 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1450 DRV_LOG(ERR, "Cannot allocate mempool %s registration object",
1455 mpr->mrs = (struct mlx5_mempool_mr *)(mpr + 1);
1461 * Destroy a mempool registration object.
1464 * Whether @p mpr owns its MRs excludively, i.e. they are not shared.
1467 mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
1468 struct mlx5_mempool_reg *mpr, bool standalone)
1473 for (i = 0; i < mpr->mrs_n; i++)
1474 share_cache->dereg_mr_cb(&mpr->mrs[i].pmd_mr);
1479 /** Find registration object of a mempool. */
1480 static struct mlx5_mempool_reg *
1481 mlx5_mempool_reg_lookup(struct mlx5_mr_share_cache *share_cache,
1482 struct rte_mempool *mp)
1484 struct mlx5_mempool_reg *mpr;
1486 LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1492 /** Increment reference counters of MRs used in the registration. */
1494 mlx5_mempool_reg_attach(struct mlx5_mempool_reg *mpr)
1498 for (i = 0; i < mpr->mrs_n; i++)
1499 __atomic_add_fetch(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
1503 * Decrement reference counters of MRs used in the registration.
1505 * @return True if no more references to @p mpr MRs exist, False otherwise.
1508 mlx5_mempool_reg_detach(struct mlx5_mempool_reg *mpr)
1513 for (i = 0; i < mpr->mrs_n; i++)
1514 ret |= __atomic_sub_fetch(&mpr->mrs[i].refcnt, 1,
1515 __ATOMIC_RELAXED) == 0;
1520 mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
1521 void *pd, struct rte_mempool *mp)
1523 struct mlx5_range *ranges = NULL;
1524 struct mlx5_mempool_reg *mpr, *new_mpr;
1525 unsigned int i, ranges_n;
1526 bool share_hugepage;
1529 /* Early check to avoid unnecessary creation of MRs. */
1530 rte_rwlock_read_lock(&share_cache->rwlock);
1531 mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1532 rte_rwlock_read_unlock(&share_cache->rwlock);
1534 DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1539 if (mlx5_mempool_reg_analyze(mp, &ranges, &ranges_n,
1540 &share_hugepage) < 0) {
1541 DRV_LOG(ERR, "Cannot get mempool %s memory ranges", mp->name);
1545 new_mpr = mlx5_mempool_reg_create(mp, ranges_n);
1546 if (new_mpr == NULL) {
1548 "Cannot create a registration object for mempool %s in PD %p",
1554 * If the entire mempool fits in a single hugepage, the MR for this
1555 * hugepage can be shared across mempools that also fit in it.
1557 if (share_hugepage) {
1558 rte_rwlock_write_lock(&share_cache->rwlock);
1559 LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next) {
1560 if (mpr->mrs[0].pmd_mr.addr == (void *)ranges[0].start)
1564 new_mpr->mrs = mpr->mrs;
1565 mlx5_mempool_reg_attach(new_mpr);
1566 LIST_INSERT_HEAD(&share_cache->mempool_reg_list,
1569 rte_rwlock_write_unlock(&share_cache->rwlock);
1571 DRV_LOG(DEBUG, "Shared MR %#x in PD %p for mempool %s with mempool %s",
1572 mpr->mrs[0].pmd_mr.lkey, pd, mp->name,
1578 for (i = 0; i < ranges_n; i++) {
1579 struct mlx5_mempool_mr *mr = &new_mpr->mrs[i];
1580 const struct mlx5_range *range = &ranges[i];
1581 size_t len = range->end - range->start;
1583 if (share_cache->reg_mr_cb(pd, (void *)range->start, len,
1586 "Failed to create an MR in PD %p for address range "
1587 "[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1588 pd, range->start, range->end, len, mp->name);
1592 "Created a new MR %#x in PD %p for address range "
1593 "[0x%" PRIxPTR ", 0x%" PRIxPTR "] (%zu bytes) for mempool %s",
1594 mr->pmd_mr.lkey, pd, range->start, range->end, len,
1597 if (i != ranges_n) {
1598 mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1602 /* Concurrent registration is not supposed to happen. */
1603 rte_rwlock_write_lock(&share_cache->rwlock);
1604 mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1606 mlx5_mempool_reg_attach(new_mpr);
1607 LIST_INSERT_HEAD(&share_cache->mempool_reg_list, new_mpr, next);
1610 rte_rwlock_write_unlock(&share_cache->rwlock);
1612 DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
1614 mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
1624 mlx5_mr_mempool_register_secondary(struct mlx5_mr_share_cache *share_cache,
1625 void *pd, struct rte_mempool *mp,
1626 struct mlx5_mp_id *mp_id)
1628 if (mp_id == NULL) {
1632 return mlx5_mp_req_mempool_reg(mp_id, share_cache, pd, mp, true);
1636 * Register the memory of a mempool in the protection domain.
1638 * @param share_cache
1639 * Shared MR cache of the protection domain.
1641 * Protection domain object.
1643 * Mempool to register.
1645 * Multi-process identifier, may be NULL for the primary process.
1648 * 0 on success, (-1) on failure and rte_errno is set.
1651 mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,
1652 struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
1654 if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1656 switch (rte_eal_process_type()) {
1657 case RTE_PROC_PRIMARY:
1658 return mlx5_mr_mempool_register_primary(share_cache, pd, mp);
1659 case RTE_PROC_SECONDARY:
1660 return mlx5_mr_mempool_register_secondary(share_cache, pd, mp,
1668 mlx5_mr_mempool_unregister_primary(struct mlx5_mr_share_cache *share_cache,
1669 struct rte_mempool *mp)
1671 struct mlx5_mempool_reg *mpr;
1672 bool standalone = false;
1674 rte_rwlock_write_lock(&share_cache->rwlock);
1675 LIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)
1676 if (mpr->mp == mp) {
1677 LIST_REMOVE(mpr, next);
1678 standalone = mlx5_mempool_reg_detach(mpr);
1681 * The unlock operation below provides a memory
1682 * barrier due to its store-release semantics.
1684 ++share_cache->dev_gen;
1687 rte_rwlock_write_unlock(&share_cache->rwlock);
1692 mlx5_mempool_reg_destroy(share_cache, mpr, standalone);
1697 mlx5_mr_mempool_unregister_secondary(struct mlx5_mr_share_cache *share_cache,
1698 struct rte_mempool *mp,
1699 struct mlx5_mp_id *mp_id)
1701 if (mp_id == NULL) {
1705 return mlx5_mp_req_mempool_reg(mp_id, share_cache, NULL, mp, false);
1709 * Unregister the memory of a mempool from the protection domain.
1711 * @param share_cache
1712 * Shared MR cache of the protection domain.
1714 * Mempool to unregister.
1716 * Multi-process identifier, may be NULL for the primary process.
1719 * 0 on success, (-1) on failure and rte_errno is set.
1722 mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,
1723 struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
1725 if (mp->flags & RTE_MEMPOOL_F_NON_IO)
1727 switch (rte_eal_process_type()) {
1728 case RTE_PROC_PRIMARY:
1729 return mlx5_mr_mempool_unregister_primary(share_cache, mp);
1730 case RTE_PROC_SECONDARY:
1731 return mlx5_mr_mempool_unregister_secondary(share_cache, mp,
1739 * Lookup a MR key by and address in a registered mempool.
1742 * Mempool registration object.
1744 * Address within the mempool.
1746 * Bottom-half cache entry to fill.
1749 * MR key or UINT32_MAX on failure, which can only happen
1750 * if the address is not from within the mempool.
1753 mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,
1754 struct mr_cache_entry *entry)
1756 uint32_t lkey = UINT32_MAX;
1759 for (i = 0; i < mpr->mrs_n; i++) {
1760 const struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr;
1761 uintptr_t mr_addr = (uintptr_t)mr->addr;
1763 if (mr_addr <= addr) {
1764 lkey = rte_cpu_to_be_32(mr->lkey);
1765 entry->start = mr_addr;
1766 entry->end = mr_addr + mr->len;
1775 * Update bottom-half cache from the list of mempool registrations.
1777 * @param share_cache
1778 * Pointer to a global shared MR cache.
1780 * Per-queue MR control handle.
1782 * Pointer to an entry in the bottom-half cache to update
1783 * with the MR lkey looked up.
1785 * Mempool containing the address.
1787 * Address to lookup.
1789 * MR lkey on success, UINT32_MAX on failure.
1792 mlx5_lookup_mempool_regs(struct mlx5_mr_share_cache *share_cache,
1793 struct mlx5_mr_ctrl *mr_ctrl,
1794 struct mr_cache_entry *entry,
1795 struct rte_mempool *mp, uintptr_t addr)
1797 struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
1798 struct mlx5_mempool_reg *mpr;
1799 uint32_t lkey = UINT32_MAX;
1801 /* If local cache table is full, try to double it. */
1802 if (unlikely(bt->len == bt->size))
1803 mr_btree_expand(bt, bt->size << 1);
1804 /* Look up in mempool registrations. */
1805 rte_rwlock_read_lock(&share_cache->rwlock);
1806 mpr = mlx5_mempool_reg_lookup(share_cache, mp);
1808 lkey = mlx5_mempool_reg_addr2mr(mpr, addr, entry);
1809 rte_rwlock_read_unlock(&share_cache->rwlock);
1811 * Update local cache. Even if it fails, return the found entry
1812 * to update top-half cache. Next time, this entry will be found
1813 * in the global cache.
1815 if (lkey != UINT32_MAX)
1816 mr_btree_insert(bt, entry);
1821 * Bottom-half lookup for the address from the mempool.
1823 * @param share_cache
1824 * Pointer to a global shared MR cache.
1826 * Per-queue MR control handle.
1828 * Mempool containing the address.
1830 * Address to lookup.
1832 * MR lkey on success, UINT32_MAX on failure.
1835 mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
1836 struct mlx5_mr_ctrl *mr_ctrl,
1837 struct rte_mempool *mp, uintptr_t addr)
1839 struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
1841 uint16_t bh_idx = 0;
1843 /* Binary-search MR translation table. */
1844 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1845 /* Update top-half cache. */
1846 if (likely(lkey != UINT32_MAX)) {
1847 *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1849 lkey = mlx5_lookup_mempool_regs(share_cache, mr_ctrl, repl,
1851 /* Can only fail if the address is not from the mempool. */
1852 if (unlikely(lkey == UINT32_MAX))
1855 /* Update the most recently used entry. */
1856 mr_ctrl->mru = mr_ctrl->head;
1857 /* Point to the next victim, the oldest. */
1858 mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1863 * Bottom-half of LKey search on. If supported, lookup for the address from
1864 * the mempool. Otherwise, search in old mechanism caches.
1867 * Pointer to mlx5 device.
1869 * Multi-process identifier, may be NULL for the primary process.
1871 * Pointer to per-queue MR control structure.
1876 * Searched LKey on success, UINT32_MAX on no match.
1879 mlx5_mr_mb2mr_bh(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
1880 struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
1883 uintptr_t addr = (uintptr_t)mb->buf_addr;
1885 if (cdev->config.mr_mempool_reg_en) {
1886 struct rte_mempool *mp = NULL;
1887 struct mlx5_mprq_buf *buf;
1889 if (!RTE_MBUF_HAS_EXTBUF(mb)) {
1890 mp = mlx5_mb2mp(mb);
1891 } else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
1892 /* Recover MPRQ mempool. */
1893 buf = mb->shinfo->fcb_opaque;
1897 lkey = mlx5_mr_mempool2mr_bh(&cdev->mr_scache,
1900 * Lookup can only fail on invalid input, e.g. "addr"
1901 * is not from "mp" or "mp" has MEMPOOL_F_NON_IO set.
1903 if (lkey != UINT32_MAX)
1906 /* Fallback for generic mechanism in corner cases. */
1908 return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, &cdev->mr_scache, mr_ctrl,
1909 addr, cdev->config.mr_ext_memseg_en);
1913 * Query LKey from a packet buffer.
1916 * Pointer to the mlx5 device structure.
1918 * Multi-process identifier, may be NULL for the primary process.
1920 * Pointer to per-queue MR control structure.
1925 * Searched LKey on success, UINT32_MAX on no match.
1928 mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
1929 struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf)
1933 /* Check generation bit to see if there's any change on existing MRs. */
1934 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
1935 mlx5_mr_flush_local_cache(mr_ctrl);
1936 /* Linear search on MR cache array. */
1937 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
1938 MLX5_MR_CACHE_N, (uintptr_t)mbuf->buf_addr);
1939 if (likely(lkey != UINT32_MAX))
1941 /* Take slower bottom-half on miss. */
1942 return mlx5_mr_mb2mr_bh(cdev, mp_id, mr_ctrl, mbuf);