1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_eal_memconfig.h>
7 #include <rte_mempool.h>
8 #include <rte_malloc.h>
9 #include <rte_rwlock.h>
11 #include "mlx5_glue.h"
12 #include "mlx5_common_mp.h"
13 #include "mlx5_common_mr.h"
14 #include "mlx5_common_utils.h"
16 struct mr_find_contig_memsegs_data {
20 const struct rte_memseg_list *msl;
24 * Expand B-tree table to a given size. Can't be called with holding
25 * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
28 * Pointer to B-tree structure.
30 * Number of entries for expansion.
33 * 0 on success, -1 on failure.
36 mr_btree_expand(struct mlx5_mr_btree *bt, int n)
44 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
45 * used inside if there's no room to expand. Because this is a quite
46 * rare case and a part of very slow path, it is very acceptable.
47 * Initially cache_bh[] will be given practically enough space and once
48 * it is expanded, expansion wouldn't be needed again ever.
50 mem = rte_realloc(bt->table, n * sizeof(struct mr_cache_entry), 0);
52 /* Not an error, B-tree search will be skipped. */
53 DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
57 DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
65 * Look up LKey from given B-tree lookup table, store the last index and return
69 * Pointer to B-tree structure.
71 * Pointer to index. Even on search failure, returns index where it stops
72 * searching so that index can be used when inserting a new entry.
77 * Searched LKey on success, UINT32_MAX on no match.
80 mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
82 struct mr_cache_entry *lkp_tbl;
86 MLX5_ASSERT(bt != NULL);
89 /* First entry must be NULL for comparison. */
90 MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
91 lkp_tbl[0].lkey == UINT32_MAX));
94 register uint16_t delta = n >> 1;
96 if (addr < lkp_tbl[base + delta].start) {
103 MLX5_ASSERT(addr >= lkp_tbl[base].start);
105 if (addr < lkp_tbl[base].end)
106 return lkp_tbl[base].lkey;
112 * Insert an entry to B-tree lookup table.
115 * Pointer to B-tree structure.
117 * Pointer to new entry to insert.
120 * 0 on success, -1 on failure.
123 mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
125 struct mr_cache_entry *lkp_tbl;
129 MLX5_ASSERT(bt != NULL);
130 MLX5_ASSERT(bt->len <= bt->size);
131 MLX5_ASSERT(bt->len > 0);
132 lkp_tbl = *bt->table;
133 /* Find out the slot for insertion. */
134 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
136 "abort insertion to B-tree(%p): already exist at"
137 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
138 (void *)bt, idx, entry->start, entry->end, entry->lkey);
139 /* Already exist, return. */
142 /* If table is full, return error. */
143 if (unlikely(bt->len == bt->size)) {
149 shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
151 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
152 lkp_tbl[idx] = *entry;
155 "inserted B-tree(%p)[%u],"
156 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
157 (void *)bt, idx, entry->start, entry->end, entry->lkey);
162 * Initialize B-tree and allocate memory for lookup table.
165 * Pointer to B-tree structure.
167 * Number of entries to allocate.
169 * NUMA socket on which memory must be allocated.
172 * 0 on success, a negative errno value otherwise and rte_errno is set.
175 mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
181 MLX5_ASSERT(!bt->table && !bt->size);
182 memset(bt, 0, sizeof(*bt));
183 bt->table = rte_calloc_socket("B-tree table",
184 n, sizeof(struct mr_cache_entry),
186 if (bt->table == NULL) {
188 DEBUG("failed to allocate memory for btree cache on socket %d",
193 /* First entry must be NULL for binary search. */
194 (*bt->table)[bt->len++] = (struct mr_cache_entry) {
197 DEBUG("initialized B-tree %p with table %p",
198 (void *)bt, (void *)bt->table);
203 * Free B-tree resources.
206 * Pointer to B-tree structure.
209 mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
213 DEBUG("freeing B-tree %p with table %p",
214 (void *)bt, (void *)bt->table);
216 memset(bt, 0, sizeof(*bt));
220 * Dump all the entries in a B-tree
223 * Pointer to B-tree structure.
226 mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
228 #ifdef RTE_LIBRTE_MLX5_DEBUG
230 struct mr_cache_entry *lkp_tbl;
234 lkp_tbl = *bt->table;
235 for (idx = 0; idx < bt->len; ++idx) {
236 struct mr_cache_entry *entry = &lkp_tbl[idx];
238 DEBUG("B-tree(%p)[%u],"
239 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
240 (void *)bt, idx, entry->start, entry->end, entry->lkey);
246 * Find virtually contiguous memory chunk in a given MR.
249 * Pointer to MR structure.
251 * Pointer to returning MR cache entry. If not found, this will not be
254 * Start index of the memseg bitmap.
257 * Next index to go on lookup.
260 mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry,
267 /* MR for external memory doesn't have memseg list. */
268 if (mr->msl == NULL) {
269 MLX5_ASSERT(mr->ms_bmp_n == 1);
270 MLX5_ASSERT(mr->ms_n == 1);
271 MLX5_ASSERT(base_idx == 0);
273 * Can't search it from memseg list but get it directly from
274 * pmd_mr as there's only one chunk.
276 entry->start = (uintptr_t)mr->pmd_mr.addr;
277 entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len;
278 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
279 /* Returning 1 ends iteration. */
282 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
283 if (rte_bitmap_get(mr->ms_bmp, idx)) {
284 const struct rte_memseg_list *msl;
285 const struct rte_memseg *ms;
288 ms = rte_fbarray_get(&msl->memseg_arr,
289 mr->ms_base_idx + idx);
290 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
293 end = ms->addr_64 + ms->hugepage_sz;
295 /* Passed the end of a fragment. */
300 /* Found one chunk. */
301 entry->start = start;
303 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
309 * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
310 * Then, this entry will have to be searched by mr_lookup_list() in
311 * mlx5_mr_create() on miss.
314 * Pointer to a global shared MR cache.
316 * Pointer to MR to insert.
319 * 0 on success, -1 on failure.
322 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
327 DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)",
328 (void *)mr, (void *)share_cache);
329 for (n = 0; n < mr->ms_bmp_n; ) {
330 struct mr_cache_entry entry;
332 memset(&entry, 0, sizeof(entry));
333 /* Find a contiguous chunk and advance the index. */
334 n = mr_find_next_chunk(mr, &entry, n);
337 if (mr_btree_insert(&share_cache->cache, &entry) < 0) {
339 * Overflowed, but the global table cannot be expanded
340 * because of deadlock.
349 * Look up address in the original global MR list.
352 * Pointer to a global shared MR cache.
354 * Pointer to returning MR cache entry. If no match, this will not be updated.
359 * Found MR on match, NULL otherwise.
362 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
363 struct mr_cache_entry *entry, uintptr_t addr)
367 /* Iterate all the existing MRs. */
368 LIST_FOREACH(mr, &share_cache->mr_list, mr) {
373 for (n = 0; n < mr->ms_bmp_n; ) {
374 struct mr_cache_entry ret;
376 memset(&ret, 0, sizeof(ret));
377 n = mr_find_next_chunk(mr, &ret, n);
378 if (addr >= ret.start && addr < ret.end) {
389 * Look up address on global MR cache.
392 * Pointer to a global shared MR cache.
394 * Pointer to returning MR cache entry. If no match, this will not be updated.
399 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
402 mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
403 struct mr_cache_entry *entry, uintptr_t addr)
406 uint32_t lkey = UINT32_MAX;
410 * If the global cache has overflowed since it failed to expand the
411 * B-tree table, it can't have all the existing MRs. Then, the address
412 * has to be searched by traversing the original MR list instead, which
413 * is very slow path. Otherwise, the global cache is all inclusive.
415 if (!unlikely(share_cache->cache.overflow)) {
416 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
417 if (lkey != UINT32_MAX)
418 *entry = (*share_cache->cache.table)[idx];
420 /* Falling back to the slowest path. */
421 mr = mlx5_mr_lookup_list(share_cache, entry, addr);
425 MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
431 * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
432 * can raise memory free event and the callback function will spin on the lock.
435 * Pointer to MR to free.
438 mr_free(struct mlx5_mr *mr)
442 DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
443 if (mr->pmd_mr.obj != NULL)
444 claim_zero(mlx5_glue->dereg_mr(mr->pmd_mr.obj));
445 if (mr->ms_bmp != NULL)
446 rte_bitmap_free(mr->ms_bmp);
451 mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
455 DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
456 /* Flush cache to rebuild. */
457 share_cache->cache.len = 1;
458 share_cache->cache.overflow = 0;
459 /* Iterate all the existing MRs. */
460 LIST_FOREACH(mr, &share_cache->mr_list, mr)
461 if (mlx5_mr_insert_cache(share_cache, mr) < 0)
466 * Release resources of detached MR having no online entry.
469 * Pointer to a global shared MR cache.
472 mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
474 struct mlx5_mr *mr_next;
475 struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
477 /* Must be called from the primary process. */
478 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
480 * MR can't be freed with holding the lock because rte_free() could call
481 * memory free callback function. This will be a deadlock situation.
483 rte_rwlock_write_lock(&share_cache->rwlock);
484 /* Detach the whole free list and release it after unlocking. */
485 free_list = share_cache->mr_free_list;
486 LIST_INIT(&share_cache->mr_free_list);
487 rte_rwlock_write_unlock(&share_cache->rwlock);
488 /* Release resources. */
489 mr_next = LIST_FIRST(&free_list);
490 while (mr_next != NULL) {
491 struct mlx5_mr *mr = mr_next;
493 mr_next = LIST_NEXT(mr, mr);
498 /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
500 mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
501 const struct rte_memseg *ms, size_t len, void *arg)
503 struct mr_find_contig_memsegs_data *data = arg;
505 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
507 /* Found, save it and stop walking. */
508 data->start = ms->addr_64;
509 data->end = ms->addr_64 + len;
515 * Create a new global Memory Region (MR) for a missing virtual address.
516 * This API should be called on a secondary process, then a request is sent to
517 * the primary process in order to create a MR for the address. As the global MR
518 * list is on the shared memory, following LKey lookup should succeed unless the
522 * Pointer to pd of a device (net, regex, vdpa,...).
524 * Pointer to a global shared MR cache.
526 * Pointer to returning MR cache entry, found in the global cache or newly
527 * created. If failed to create one, this will not be updated.
529 * Target virtual address to register.
530 * @param mr_ext_memseg_en
531 * Configurable flag about external memory segment enable or not.
534 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
537 mlx5_mr_create_secondary(void *pd __rte_unused,
538 struct mlx5_mp_id *mp_id,
539 struct mlx5_mr_share_cache *share_cache,
540 struct mr_cache_entry *entry, uintptr_t addr,
541 unsigned int mr_ext_memseg_en __rte_unused)
545 DEBUG("port %u requesting MR creation for address (%p)",
546 mp_id->port_id, (void *)addr);
547 ret = mlx5_mp_req_mr_create(mp_id, addr);
549 DEBUG("Fail to request MR creation for address (%p)",
553 rte_rwlock_read_lock(&share_cache->rwlock);
554 /* Fill in output data. */
555 mlx5_mr_lookup_cache(share_cache, entry, addr);
556 /* Lookup can't fail. */
557 MLX5_ASSERT(entry->lkey != UINT32_MAX);
558 rte_rwlock_read_unlock(&share_cache->rwlock);
559 DEBUG("MR CREATED by primary process for %p:\n"
560 " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
561 (void *)addr, entry->start, entry->end, entry->lkey);
566 * Create a new global Memory Region (MR) for a missing virtual address.
567 * Register entire virtually contiguous memory chunk around the address.
570 * Pointer to pd of a device (net, regex, vdpa,...).
572 * Pointer to a global shared MR cache.
574 * Pointer to returning MR cache entry, found in the global cache or newly
575 * created. If failed to create one, this will not be updated.
577 * Target virtual address to register.
578 * @param mr_ext_memseg_en
579 * Configurable flag about external memory segment enable or not.
582 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
585 mlx5_mr_create_primary(void *pd,
586 struct mlx5_mr_share_cache *share_cache,
587 struct mr_cache_entry *entry, uintptr_t addr,
588 unsigned int mr_ext_memseg_en)
590 struct mr_find_contig_memsegs_data data = {.addr = addr, };
591 struct mr_find_contig_memsegs_data data_re;
592 const struct rte_memseg_list *msl;
593 const struct rte_memseg *ms;
594 struct mlx5_mr *mr = NULL;
595 int ms_idx_shift = -1;
601 struct ibv_mr *ibv_mr;
603 DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
605 * Release detached MRs if any. This can't be called with holding either
606 * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have
607 * been detached by the memory free event but it couldn't be released
608 * inside the callback due to deadlock. As a result, releasing resources
609 * is quite opportunistic.
611 mlx5_mr_garbage_collect(share_cache);
613 * If enabled, find out a contiguous virtual address chunk in use, to
614 * which the given address belongs, in order to register maximum range.
615 * In the best case where mempools are not dynamically recreated and
616 * '--socket-mem' is specified as an EAL option, it is very likely to
617 * have only one MR(LKey) per a socket and per a hugepage-size even
618 * though the system memory is highly fragmented. As the whole memory
619 * chunk will be pinned by kernel, it can't be reused unless entire
620 * chunk is freed from EAL.
622 * If disabled, just register one memseg (page). Then, memory
623 * consumption will be minimized but it may drop performance if there
624 * are many MRs to lookup on the datapath.
626 if (!mr_ext_memseg_en) {
627 data.msl = rte_mem_virt2memseg_list((void *)addr);
628 data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
629 data.end = data.start + data.msl->page_sz;
630 } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
632 "Unable to find virtually contiguous"
633 " chunk for address (%p)."
634 " rte_memseg_contig_walk() failed.", (void *)addr);
639 /* Addresses must be page-aligned. */
640 MLX5_ASSERT(data.msl);
641 MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
642 MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
644 ms = rte_mem_virt2memseg((void *)data.start, msl);
645 len = data.end - data.start;
647 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
648 /* Number of memsegs in the range. */
649 ms_n = len / msl->page_sz;
650 DEBUG("Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
651 " page_sz=0x%" PRIx64 ", ms_n=%u",
652 (void *)addr, data.start, data.end, msl->page_sz, ms_n);
653 /* Size of memory for bitmap. */
654 bmp_size = rte_bitmap_get_memory_footprint(ms_n);
655 mr = rte_zmalloc_socket(NULL,
656 RTE_ALIGN_CEIL(sizeof(*mr),
657 RTE_CACHE_LINE_SIZE) +
659 RTE_CACHE_LINE_SIZE, msl->socket_id);
661 DEBUG("Unable to allocate memory for a new MR of"
662 " address (%p).", (void *)addr);
668 * Save the index of the first memseg and initialize memseg bitmap. To
669 * see if a memseg of ms_idx in the memseg-list is still valid, check:
670 * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
672 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
673 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
674 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
675 if (mr->ms_bmp == NULL) {
676 DEBUG("Unable to initialize bitmap for a new MR of"
677 " address (%p).", (void *)addr);
682 * Should recheck whether the extended contiguous chunk is still valid.
683 * Because memory_hotplug_lock can't be held if there's any memory
684 * related calls in a critical path, resource allocation above can't be
685 * locked. If the memory has been changed at this point, try again with
686 * just single page. If not, go on with the big chunk atomically from
689 rte_mcfg_mem_read_lock();
691 if (len > msl->page_sz &&
692 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
693 DEBUG("Unable to find virtually contiguous"
694 " chunk for address (%p)."
695 " rte_memseg_contig_walk() failed.", (void *)addr);
699 if (data.start != data_re.start || data.end != data_re.end) {
701 * The extended contiguous chunk has been changed. Try again
702 * with single memseg instead.
704 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
705 data.end = data.start + msl->page_sz;
706 rte_mcfg_mem_read_unlock();
708 goto alloc_resources;
710 MLX5_ASSERT(data.msl == data_re.msl);
711 rte_rwlock_write_lock(&share_cache->rwlock);
713 * Check the address is really missing. If other thread already created
714 * one or it is not found due to overflow, abort and return.
716 if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) {
718 * Insert to the global cache table. It may fail due to
719 * low-on-memory. Then, this entry will have to be searched
722 mr_btree_insert(&share_cache->cache, entry);
723 DEBUG("Found MR for %p on final lookup, abort", (void *)addr);
724 rte_rwlock_write_unlock(&share_cache->rwlock);
725 rte_mcfg_mem_read_unlock();
727 * Must be unlocked before calling rte_free() because
728 * mlx5_mr_mem_event_free_cb() can be called inside.
734 * Trim start and end addresses for verbs MR. Set bits for registering
735 * memsegs but exclude already registered ones. Bitmap can be
738 for (n = 0; n < ms_n; ++n) {
740 struct mr_cache_entry ret;
742 memset(&ret, 0, sizeof(ret));
743 start = data_re.start + n * msl->page_sz;
744 /* Exclude memsegs already registered by other MRs. */
745 if (mlx5_mr_lookup_cache(share_cache, &ret, start) ==
748 * Start from the first unregistered memseg in the
751 if (ms_idx_shift == -1) {
752 mr->ms_base_idx += n;
756 data.end = start + msl->page_sz;
757 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
761 len = data.end - data.start;
762 mr->ms_bmp_n = len / msl->page_sz;
763 MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
765 * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
766 * called with holding the memory lock because it doesn't use
767 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
768 * through mlx5_alloc_verbs_buf().
770 ibv_mr = mlx5_glue->reg_mr(pd, (void *)data.start, len,
771 IBV_ACCESS_LOCAL_WRITE |
772 (haswell_broadwell_cpu ? 0 :
773 IBV_ACCESS_RELAXED_ORDERING));
774 if (ibv_mr == NULL) {
775 DEBUG("Fail to create an MR for address (%p)",
780 mr->pmd_mr.lkey = ibv_mr->lkey;
781 mr->pmd_mr.addr = ibv_mr->addr;
782 mr->pmd_mr.len = ibv_mr->length;
783 mr->pmd_mr.obj = ibv_mr;
784 MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start);
785 MLX5_ASSERT(mr->pmd_mr.len);
786 LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr);
787 DEBUG("MR CREATED (%p) for %p:\n"
788 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
789 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
790 (void *)mr, (void *)addr, data.start, data.end,
791 rte_cpu_to_be_32(mr->pmd_mr.lkey),
792 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
793 /* Insert to the global cache table. */
794 mlx5_mr_insert_cache(share_cache, mr);
795 /* Fill in output data. */
796 mlx5_mr_lookup_cache(share_cache, entry, addr);
797 /* Lookup can't fail. */
798 MLX5_ASSERT(entry->lkey != UINT32_MAX);
799 rte_rwlock_write_unlock(&share_cache->rwlock);
800 rte_mcfg_mem_read_unlock();
803 rte_rwlock_write_unlock(&share_cache->rwlock);
805 rte_mcfg_mem_read_unlock();
808 * In case of error, as this can be called in a datapath, a warning
809 * message per an error is preferable instead. Must be unlocked before
810 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
818 * Create a new global Memory Region (MR) for a missing virtual address.
819 * This can be called from primary and secondary process.
822 * Pointer to pd handle of a device (net, regex, vdpa,...).
824 * Pointer to a global shared MR cache.
826 * Pointer to returning MR cache entry, found in the global cache or newly
827 * created. If failed to create one, this will not be updated.
829 * Target virtual address to register.
832 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
835 mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id,
836 struct mlx5_mr_share_cache *share_cache,
837 struct mr_cache_entry *entry, uintptr_t addr,
838 unsigned int mr_ext_memseg_en)
842 switch (rte_eal_process_type()) {
843 case RTE_PROC_PRIMARY:
844 ret = mlx5_mr_create_primary(pd, share_cache, entry,
845 addr, mr_ext_memseg_en);
847 case RTE_PROC_SECONDARY:
848 ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry,
849 addr, mr_ext_memseg_en);
858 * Look up address in the global MR cache table. If not found, create a new MR.
859 * Insert the found/created entry to local bottom-half cache table.
862 * Pointer to pd of a device (net, regex, vdpa,...).
864 * Pointer to a global shared MR cache.
866 * Pointer to per-queue MR control structure.
868 * Pointer to returning MR cache entry, found in the global cache or newly
869 * created. If failed to create one, this is not written.
874 * Searched LKey on success, UINT32_MAX on no match.
877 mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id,
878 struct mlx5_mr_share_cache *share_cache,
879 struct mlx5_mr_ctrl *mr_ctrl,
880 struct mr_cache_entry *entry, uintptr_t addr,
881 unsigned int mr_ext_memseg_en)
883 struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
887 /* If local cache table is full, try to double it. */
888 if (unlikely(bt->len == bt->size))
889 mr_btree_expand(bt, bt->size << 1);
890 /* Look up in the global cache. */
891 rte_rwlock_read_lock(&share_cache->rwlock);
892 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
893 if (lkey != UINT32_MAX) {
895 *entry = (*share_cache->cache.table)[idx];
896 rte_rwlock_read_unlock(&share_cache->rwlock);
898 * Update local cache. Even if it fails, return the found entry
899 * to update top-half cache. Next time, this entry will be found
900 * in the global cache.
902 mr_btree_insert(bt, entry);
905 rte_rwlock_read_unlock(&share_cache->rwlock);
906 /* First time to see the address? Create a new MR. */
907 lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr,
910 * Update the local cache if successfully created a new global MR. Even
911 * if failed to create one, there's no action to take in this datapath
912 * code. As returning LKey is invalid, this will eventually make HW
915 if (lkey != UINT32_MAX)
916 mr_btree_insert(bt, entry);
921 * Bottom-half of LKey search on datapath. First search in cache_bh[] and if
922 * misses, search in the global MR cache table and update the new entry to
923 * per-queue local caches.
926 * Pointer to pd of a device (net, regex, vdpa,...).
928 * Pointer to a global shared MR cache.
930 * Pointer to per-queue MR control structure.
935 * Searched LKey on success, UINT32_MAX on no match.
937 uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
938 struct mlx5_mr_share_cache *share_cache,
939 struct mlx5_mr_ctrl *mr_ctrl,
940 uintptr_t addr, unsigned int mr_ext_memseg_en)
944 /* Victim in top-half cache to replace with new entry. */
945 struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
947 /* Binary-search MR translation table. */
948 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
949 /* Update top-half cache. */
950 if (likely(lkey != UINT32_MAX)) {
951 *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
954 * If missed in local lookup table, search in the global cache
955 * and local cache_bh[] will be updated inside if possible.
956 * Top-half cache entry will also be updated.
958 lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl,
959 repl, addr, mr_ext_memseg_en);
960 if (unlikely(lkey == UINT32_MAX))
963 /* Update the most recently used entry. */
964 mr_ctrl->mru = mr_ctrl->head;
965 /* Point to the next victim, the oldest. */
966 mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
971 * Release all the created MRs and resources on global MR cache of a device.
975 * Pointer to a global shared MR cache.
978 mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
980 struct mlx5_mr *mr_next;
982 rte_rwlock_write_lock(&share_cache->rwlock);
983 /* Detach from MR list and move to free list. */
984 mr_next = LIST_FIRST(&share_cache->mr_list);
985 while (mr_next != NULL) {
986 struct mlx5_mr *mr = mr_next;
988 mr_next = LIST_NEXT(mr, mr);
990 LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
992 LIST_INIT(&share_cache->mr_list);
993 /* Free global cache. */
994 mlx5_mr_btree_free(&share_cache->cache);
995 rte_rwlock_write_unlock(&share_cache->rwlock);
996 /* Free all remaining MRs. */
997 mlx5_mr_garbage_collect(share_cache);
1001 * Flush all of the local cache entries.
1004 * Pointer to per-queue MR local cache.
1007 mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1009 /* Reset the most-recently-used index. */
1011 /* Reset the linear search array. */
1013 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1014 /* Reset the B-tree table. */
1015 mr_ctrl->cache_bh.len = 1;
1016 mr_ctrl->cache_bh.overflow = 0;
1017 /* Update the generation number. */
1018 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1019 DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1020 (void *)mr_ctrl, mr_ctrl->cur_gen);
1024 * Creates a memory region for external memory, that is memory which is not
1025 * part of the DPDK memory segments.
1028 * Pointer to pd of a device (net, regex, vdpa,...).
1030 * Starting virtual address of memory.
1032 * Length of memory segment being mapped.
1034 * Socket to allocate heap memory for the control structures.
1037 * Pointer to MR structure on success, NULL otherwise.
1040 mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id)
1042 struct ibv_mr *ibv_mr;
1043 struct mlx5_mr *mr = NULL;
1045 mr = rte_zmalloc_socket(NULL,
1046 RTE_ALIGN_CEIL(sizeof(*mr),
1047 RTE_CACHE_LINE_SIZE),
1048 RTE_CACHE_LINE_SIZE, socket_id);
1051 ibv_mr = mlx5_glue->reg_mr(pd, (void *)addr, len,
1052 IBV_ACCESS_LOCAL_WRITE |
1053 (haswell_broadwell_cpu ? 0 :
1054 IBV_ACCESS_RELAXED_ORDERING));
1055 if (ibv_mr == NULL) {
1057 "Fail to create MR for address (%p)",
1062 mr->pmd_mr.lkey = ibv_mr->lkey;
1063 mr->pmd_mr.addr = ibv_mr->addr;
1064 mr->pmd_mr.len = ibv_mr->length;
1065 mr->pmd_mr.obj = ibv_mr;
1066 mr->msl = NULL; /* Mark it is external memory. */
1071 "MR CREATED (%p) for external memory %p:\n"
1072 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1073 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1074 (void *)mr, (void *)addr,
1075 addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1076 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1081 * Dump all the created MRs and the global cache entries.
1084 * Pointer to Ethernet device shared context.
1087 mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
1089 #ifdef RTE_LIBRTE_MLX5_DEBUG
1094 rte_rwlock_read_lock(&share_cache->rwlock);
1095 /* Iterate all the existing MRs. */
1096 LIST_FOREACH(mr, &share_cache->mr_list, mr) {
1099 DEBUG("MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
1100 mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1101 mr->ms_n, mr->ms_bmp_n);
1104 for (n = 0; n < mr->ms_bmp_n; ) {
1105 struct mr_cache_entry ret = { 0, };
1107 n = mr_find_next_chunk(mr, &ret, n);
1110 DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1111 chunk_n++, ret.start, ret.end);
1114 DEBUG("Dumping global cache %p", (void *)share_cache);
1115 mlx5_mr_btree_dump(&share_cache->cache);
1116 rte_rwlock_read_unlock(&share_cache->rwlock);