1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2020 Mellanox Technologies, Ltd
5 #include <rte_eal_memconfig.h>
7 #include <rte_mempool.h>
8 #include <rte_malloc.h>
9 #include <rte_rwlock.h>
11 #include "mlx5_glue.h"
12 #include "mlx5_common_mp.h"
13 #include "mlx5_common_mr.h"
14 #include "mlx5_common_log.h"
15 #include "mlx5_malloc.h"
17 struct mr_find_contig_memsegs_data {
21 const struct rte_memseg_list *msl;
25 * Expand B-tree table to a given size. Can't be called with holding
26 * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
29 * Pointer to B-tree structure.
31 * Number of entries for expansion.
34 * 0 on success, -1 on failure.
37 mr_btree_expand(struct mlx5_mr_btree *bt, int n)
45 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
46 * used inside if there's no room to expand. Because this is a quite
47 * rare case and a part of very slow path, it is very acceptable.
48 * Initially cache_bh[] will be given practically enough space and once
49 * it is expanded, expansion wouldn't be needed again ever.
51 mem = mlx5_realloc(bt->table, MLX5_MEM_RTE | MLX5_MEM_ZERO,
52 n * sizeof(struct mr_cache_entry), 0, SOCKET_ID_ANY);
54 /* Not an error, B-tree search will be skipped. */
55 DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
59 DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
67 * Look up LKey from given B-tree lookup table, store the last index and return
71 * Pointer to B-tree structure.
73 * Pointer to index. Even on search failure, returns index where it stops
74 * searching so that index can be used when inserting a new entry.
79 * Searched LKey on success, UINT32_MAX on no match.
82 mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
84 struct mr_cache_entry *lkp_tbl;
88 MLX5_ASSERT(bt != NULL);
91 /* First entry must be NULL for comparison. */
92 MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
93 lkp_tbl[0].lkey == UINT32_MAX));
96 register uint16_t delta = n >> 1;
98 if (addr < lkp_tbl[base + delta].start) {
105 MLX5_ASSERT(addr >= lkp_tbl[base].start);
107 if (addr < lkp_tbl[base].end)
108 return lkp_tbl[base].lkey;
114 * Insert an entry to B-tree lookup table.
117 * Pointer to B-tree structure.
119 * Pointer to new entry to insert.
122 * 0 on success, -1 on failure.
125 mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
127 struct mr_cache_entry *lkp_tbl;
131 MLX5_ASSERT(bt != NULL);
132 MLX5_ASSERT(bt->len <= bt->size);
133 MLX5_ASSERT(bt->len > 0);
134 lkp_tbl = *bt->table;
135 /* Find out the slot for insertion. */
136 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
138 "abort insertion to B-tree(%p): already exist at"
139 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
140 (void *)bt, idx, entry->start, entry->end, entry->lkey);
141 /* Already exist, return. */
144 /* If table is full, return error. */
145 if (unlikely(bt->len == bt->size)) {
151 shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
153 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
154 lkp_tbl[idx] = *entry;
157 "inserted B-tree(%p)[%u],"
158 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
159 (void *)bt, idx, entry->start, entry->end, entry->lkey);
164 * Initialize B-tree and allocate memory for lookup table.
167 * Pointer to B-tree structure.
169 * Number of entries to allocate.
171 * NUMA socket on which memory must be allocated.
174 * 0 on success, a negative errno value otherwise and rte_errno is set.
177 mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
183 MLX5_ASSERT(!bt->table && !bt->size);
184 memset(bt, 0, sizeof(*bt));
185 bt->table = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
186 sizeof(struct mr_cache_entry) * n,
188 if (bt->table == NULL) {
191 "failed to allocate memory for btree cache on socket "
196 /* First entry must be NULL for binary search. */
197 (*bt->table)[bt->len++] = (struct mr_cache_entry) {
200 DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
201 (void *)bt, (void *)bt->table);
206 * Free B-tree resources.
209 * Pointer to B-tree structure.
212 mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
216 DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
217 (void *)bt, (void *)bt->table);
218 mlx5_free(bt->table);
219 memset(bt, 0, sizeof(*bt));
223 * Dump all the entries in a B-tree
226 * Pointer to B-tree structure.
229 mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
231 #ifdef RTE_LIBRTE_MLX5_DEBUG
233 struct mr_cache_entry *lkp_tbl;
237 lkp_tbl = *bt->table;
238 for (idx = 0; idx < bt->len; ++idx) {
239 struct mr_cache_entry *entry = &lkp_tbl[idx];
241 DRV_LOG(DEBUG, "B-tree(%p)[%u],"
242 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
243 (void *)bt, idx, entry->start, entry->end, entry->lkey);
249 * Find virtually contiguous memory chunk in a given MR.
252 * Pointer to MR structure.
254 * Pointer to returning MR cache entry. If not found, this will not be
257 * Start index of the memseg bitmap.
260 * Next index to go on lookup.
263 mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry,
270 /* MR for external memory doesn't have memseg list. */
271 if (mr->msl == NULL) {
272 MLX5_ASSERT(mr->ms_bmp_n == 1);
273 MLX5_ASSERT(mr->ms_n == 1);
274 MLX5_ASSERT(base_idx == 0);
276 * Can't search it from memseg list but get it directly from
277 * pmd_mr as there's only one chunk.
279 entry->start = (uintptr_t)mr->pmd_mr.addr;
280 entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len;
281 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
282 /* Returning 1 ends iteration. */
285 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
286 if (rte_bitmap_get(mr->ms_bmp, idx)) {
287 const struct rte_memseg_list *msl;
288 const struct rte_memseg *ms;
291 ms = rte_fbarray_get(&msl->memseg_arr,
292 mr->ms_base_idx + idx);
293 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
296 end = ms->addr_64 + ms->hugepage_sz;
298 /* Passed the end of a fragment. */
303 /* Found one chunk. */
304 entry->start = start;
306 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
312 * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
313 * Then, this entry will have to be searched by mr_lookup_list() in
314 * mlx5_mr_create() on miss.
317 * Pointer to a global shared MR cache.
319 * Pointer to MR to insert.
322 * 0 on success, -1 on failure.
325 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
330 DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)",
331 (void *)mr, (void *)share_cache);
332 for (n = 0; n < mr->ms_bmp_n; ) {
333 struct mr_cache_entry entry;
335 memset(&entry, 0, sizeof(entry));
336 /* Find a contiguous chunk and advance the index. */
337 n = mr_find_next_chunk(mr, &entry, n);
340 if (mr_btree_insert(&share_cache->cache, &entry) < 0) {
342 * Overflowed, but the global table cannot be expanded
343 * because of deadlock.
352 * Look up address in the original global MR list.
355 * Pointer to a global shared MR cache.
357 * Pointer to returning MR cache entry. If no match, this will not be updated.
362 * Found MR on match, NULL otherwise.
365 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
366 struct mr_cache_entry *entry, uintptr_t addr)
370 /* Iterate all the existing MRs. */
371 LIST_FOREACH(mr, &share_cache->mr_list, mr) {
376 for (n = 0; n < mr->ms_bmp_n; ) {
377 struct mr_cache_entry ret;
379 memset(&ret, 0, sizeof(ret));
380 n = mr_find_next_chunk(mr, &ret, n);
381 if (addr >= ret.start && addr < ret.end) {
392 * Look up address on global MR cache.
395 * Pointer to a global shared MR cache.
397 * Pointer to returning MR cache entry. If no match, this will not be updated.
402 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
405 mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
406 struct mr_cache_entry *entry, uintptr_t addr)
409 uint32_t lkey = UINT32_MAX;
413 * If the global cache has overflowed since it failed to expand the
414 * B-tree table, it can't have all the existing MRs. Then, the address
415 * has to be searched by traversing the original MR list instead, which
416 * is very slow path. Otherwise, the global cache is all inclusive.
418 if (!unlikely(share_cache->cache.overflow)) {
419 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
420 if (lkey != UINT32_MAX)
421 *entry = (*share_cache->cache.table)[idx];
423 /* Falling back to the slowest path. */
424 mr = mlx5_mr_lookup_list(share_cache, entry, addr);
428 MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
434 * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
435 * can raise memory free event and the callback function will spin on the lock.
438 * Pointer to MR to free.
441 mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
445 DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
446 dereg_mr_cb(&mr->pmd_mr);
447 if (mr->ms_bmp != NULL)
448 rte_bitmap_free(mr->ms_bmp);
453 mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
457 DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
458 /* Flush cache to rebuild. */
459 share_cache->cache.len = 1;
460 share_cache->cache.overflow = 0;
461 /* Iterate all the existing MRs. */
462 LIST_FOREACH(mr, &share_cache->mr_list, mr)
463 if (mlx5_mr_insert_cache(share_cache, mr) < 0)
468 * Release resources of detached MR having no online entry.
471 * Pointer to a global shared MR cache.
474 mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
476 struct mlx5_mr *mr_next;
477 struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
479 /* Must be called from the primary process. */
480 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
482 * MR can't be freed with holding the lock because rte_free() could call
483 * memory free callback function. This will be a deadlock situation.
485 rte_rwlock_write_lock(&share_cache->rwlock);
486 /* Detach the whole free list and release it after unlocking. */
487 free_list = share_cache->mr_free_list;
488 LIST_INIT(&share_cache->mr_free_list);
489 rte_rwlock_write_unlock(&share_cache->rwlock);
490 /* Release resources. */
491 mr_next = LIST_FIRST(&free_list);
492 while (mr_next != NULL) {
493 struct mlx5_mr *mr = mr_next;
495 mr_next = LIST_NEXT(mr, mr);
496 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
500 /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
502 mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
503 const struct rte_memseg *ms, size_t len, void *arg)
505 struct mr_find_contig_memsegs_data *data = arg;
507 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
509 /* Found, save it and stop walking. */
510 data->start = ms->addr_64;
511 data->end = ms->addr_64 + len;
517 * Create a new global Memory Region (MR) for a missing virtual address.
518 * This API should be called on a secondary process, then a request is sent to
519 * the primary process in order to create a MR for the address. As the global MR
520 * list is on the shared memory, following LKey lookup should succeed unless the
524 * Pointer to pd of a device (net, regex, vdpa,...).
526 * Pointer to a global shared MR cache.
528 * Pointer to returning MR cache entry, found in the global cache or newly
529 * created. If failed to create one, this will not be updated.
531 * Target virtual address to register.
532 * @param mr_ext_memseg_en
533 * Configurable flag about external memory segment enable or not.
536 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
539 mlx5_mr_create_secondary(void *pd __rte_unused,
540 struct mlx5_mp_id *mp_id,
541 struct mlx5_mr_share_cache *share_cache,
542 struct mr_cache_entry *entry, uintptr_t addr,
543 unsigned int mr_ext_memseg_en __rte_unused)
547 DRV_LOG(DEBUG, "port %u requesting MR creation for address (%p)",
548 mp_id->port_id, (void *)addr);
549 ret = mlx5_mp_req_mr_create(mp_id, addr);
551 DRV_LOG(DEBUG, "Fail to request MR creation for address (%p)",
555 rte_rwlock_read_lock(&share_cache->rwlock);
556 /* Fill in output data. */
557 mlx5_mr_lookup_cache(share_cache, entry, addr);
558 /* Lookup can't fail. */
559 MLX5_ASSERT(entry->lkey != UINT32_MAX);
560 rte_rwlock_read_unlock(&share_cache->rwlock);
561 DRV_LOG(DEBUG, "MR CREATED by primary process for %p:\n"
562 " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
563 (void *)addr, entry->start, entry->end, entry->lkey);
568 * Create a new global Memory Region (MR) for a missing virtual address.
569 * Register entire virtually contiguous memory chunk around the address.
572 * Pointer to pd of a device (net, regex, vdpa,...).
574 * Pointer to a global shared MR cache.
576 * Pointer to returning MR cache entry, found in the global cache or newly
577 * created. If failed to create one, this will not be updated.
579 * Target virtual address to register.
580 * @param mr_ext_memseg_en
581 * Configurable flag about external memory segment enable or not.
584 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
587 mlx5_mr_create_primary(void *pd,
588 struct mlx5_mr_share_cache *share_cache,
589 struct mr_cache_entry *entry, uintptr_t addr,
590 unsigned int mr_ext_memseg_en)
592 struct mr_find_contig_memsegs_data data = {.addr = addr, };
593 struct mr_find_contig_memsegs_data data_re;
594 const struct rte_memseg_list *msl;
595 const struct rte_memseg *ms;
596 struct mlx5_mr *mr = NULL;
597 int ms_idx_shift = -1;
604 DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
606 * Release detached MRs if any. This can't be called with holding either
607 * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have
608 * been detached by the memory free event but it couldn't be released
609 * inside the callback due to deadlock. As a result, releasing resources
610 * is quite opportunistic.
612 mlx5_mr_garbage_collect(share_cache);
614 * If enabled, find out a contiguous virtual address chunk in use, to
615 * which the given address belongs, in order to register maximum range.
616 * In the best case where mempools are not dynamically recreated and
617 * '--socket-mem' is specified as an EAL option, it is very likely to
618 * have only one MR(LKey) per a socket and per a hugepage-size even
619 * though the system memory is highly fragmented. As the whole memory
620 * chunk will be pinned by kernel, it can't be reused unless entire
621 * chunk is freed from EAL.
623 * If disabled, just register one memseg (page). Then, memory
624 * consumption will be minimized but it may drop performance if there
625 * are many MRs to lookup on the datapath.
627 if (!mr_ext_memseg_en) {
628 data.msl = rte_mem_virt2memseg_list((void *)addr);
629 data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
630 data.end = data.start + data.msl->page_sz;
631 } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
633 "Unable to find virtually contiguous"
634 " chunk for address (%p)."
635 " rte_memseg_contig_walk() failed.", (void *)addr);
640 /* Addresses must be page-aligned. */
641 MLX5_ASSERT(data.msl);
642 MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
643 MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
645 ms = rte_mem_virt2memseg((void *)data.start, msl);
646 len = data.end - data.start;
648 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
649 /* Number of memsegs in the range. */
650 ms_n = len / msl->page_sz;
651 DRV_LOG(DEBUG, "Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
652 " page_sz=0x%" PRIx64 ", ms_n=%u",
653 (void *)addr, data.start, data.end, msl->page_sz, ms_n);
654 /* Size of memory for bitmap. */
655 bmp_size = rte_bitmap_get_memory_footprint(ms_n);
656 mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
657 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE) +
658 bmp_size, RTE_CACHE_LINE_SIZE, msl->socket_id);
660 DRV_LOG(DEBUG, "Unable to allocate memory for a new MR of"
661 " address (%p).", (void *)addr);
667 * Save the index of the first memseg and initialize memseg bitmap. To
668 * see if a memseg of ms_idx in the memseg-list is still valid, check:
669 * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
671 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
672 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
673 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
674 if (mr->ms_bmp == NULL) {
675 DRV_LOG(DEBUG, "Unable to initialize bitmap for a new MR of"
676 " address (%p).", (void *)addr);
681 * Should recheck whether the extended contiguous chunk is still valid.
682 * Because memory_hotplug_lock can't be held if there's any memory
683 * related calls in a critical path, resource allocation above can't be
684 * locked. If the memory has been changed at this point, try again with
685 * just single page. If not, go on with the big chunk atomically from
688 rte_mcfg_mem_read_lock();
690 if (len > msl->page_sz &&
691 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
693 "Unable to find virtually contiguous chunk for address "
694 "(%p). rte_memseg_contig_walk() failed.", (void *)addr);
698 if (data.start != data_re.start || data.end != data_re.end) {
700 * The extended contiguous chunk has been changed. Try again
701 * with single memseg instead.
703 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
704 data.end = data.start + msl->page_sz;
705 rte_mcfg_mem_read_unlock();
706 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
707 goto alloc_resources;
709 MLX5_ASSERT(data.msl == data_re.msl);
710 rte_rwlock_write_lock(&share_cache->rwlock);
712 * Check the address is really missing. If other thread already created
713 * one or it is not found due to overflow, abort and return.
715 if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) {
717 * Insert to the global cache table. It may fail due to
718 * low-on-memory. Then, this entry will have to be searched
721 mr_btree_insert(&share_cache->cache, entry);
722 DRV_LOG(DEBUG, "Found MR for %p on final lookup, abort",
724 rte_rwlock_write_unlock(&share_cache->rwlock);
725 rte_mcfg_mem_read_unlock();
727 * Must be unlocked before calling rte_free() because
728 * mlx5_mr_mem_event_free_cb() can be called inside.
730 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
734 * Trim start and end addresses for verbs MR. Set bits for registering
735 * memsegs but exclude already registered ones. Bitmap can be
738 for (n = 0; n < ms_n; ++n) {
740 struct mr_cache_entry ret;
742 memset(&ret, 0, sizeof(ret));
743 start = data_re.start + n * msl->page_sz;
744 /* Exclude memsegs already registered by other MRs. */
745 if (mlx5_mr_lookup_cache(share_cache, &ret, start) ==
748 * Start from the first unregistered memseg in the
751 if (ms_idx_shift == -1) {
752 mr->ms_base_idx += n;
756 data.end = start + msl->page_sz;
757 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
761 len = data.end - data.start;
762 mr->ms_bmp_n = len / msl->page_sz;
763 MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
765 * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
766 * be called with holding the memory lock because it doesn't use
767 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
768 * through mlx5_alloc_verbs_buf().
770 share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr);
771 if (mr->pmd_mr.obj == NULL) {
772 DRV_LOG(DEBUG, "Fail to create an MR for address (%p)",
777 MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start);
778 MLX5_ASSERT(mr->pmd_mr.len);
779 LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr);
780 DRV_LOG(DEBUG, "MR CREATED (%p) for %p:\n"
781 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
782 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
783 (void *)mr, (void *)addr, data.start, data.end,
784 rte_cpu_to_be_32(mr->pmd_mr.lkey),
785 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
786 /* Insert to the global cache table. */
787 mlx5_mr_insert_cache(share_cache, mr);
788 /* Fill in output data. */
789 mlx5_mr_lookup_cache(share_cache, entry, addr);
790 /* Lookup can't fail. */
791 MLX5_ASSERT(entry->lkey != UINT32_MAX);
792 rte_rwlock_write_unlock(&share_cache->rwlock);
793 rte_mcfg_mem_read_unlock();
796 rte_rwlock_write_unlock(&share_cache->rwlock);
798 rte_mcfg_mem_read_unlock();
801 * In case of error, as this can be called in a datapath, a warning
802 * message per an error is preferable instead. Must be unlocked before
803 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
806 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
811 * Create a new global Memory Region (MR) for a missing virtual address.
812 * This can be called from primary and secondary process.
815 * Pointer to pd handle of a device (net, regex, vdpa,...).
817 * Pointer to a global shared MR cache.
819 * Pointer to returning MR cache entry, found in the global cache or newly
820 * created. If failed to create one, this will not be updated.
822 * Target virtual address to register.
825 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
828 mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id,
829 struct mlx5_mr_share_cache *share_cache,
830 struct mr_cache_entry *entry, uintptr_t addr,
831 unsigned int mr_ext_memseg_en)
835 switch (rte_eal_process_type()) {
836 case RTE_PROC_PRIMARY:
837 ret = mlx5_mr_create_primary(pd, share_cache, entry,
838 addr, mr_ext_memseg_en);
840 case RTE_PROC_SECONDARY:
841 ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry,
842 addr, mr_ext_memseg_en);
851 * Look up address in the global MR cache table. If not found, create a new MR.
852 * Insert the found/created entry to local bottom-half cache table.
855 * Pointer to pd of a device (net, regex, vdpa,...).
857 * Pointer to a global shared MR cache.
859 * Pointer to per-queue MR control structure.
861 * Pointer to returning MR cache entry, found in the global cache or newly
862 * created. If failed to create one, this is not written.
867 * Searched LKey on success, UINT32_MAX on no match.
870 mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id,
871 struct mlx5_mr_share_cache *share_cache,
872 struct mlx5_mr_ctrl *mr_ctrl,
873 struct mr_cache_entry *entry, uintptr_t addr,
874 unsigned int mr_ext_memseg_en)
876 struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
880 /* If local cache table is full, try to double it. */
881 if (unlikely(bt->len == bt->size))
882 mr_btree_expand(bt, bt->size << 1);
883 /* Look up in the global cache. */
884 rte_rwlock_read_lock(&share_cache->rwlock);
885 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
886 if (lkey != UINT32_MAX) {
888 *entry = (*share_cache->cache.table)[idx];
889 rte_rwlock_read_unlock(&share_cache->rwlock);
891 * Update local cache. Even if it fails, return the found entry
892 * to update top-half cache. Next time, this entry will be found
893 * in the global cache.
895 mr_btree_insert(bt, entry);
898 rte_rwlock_read_unlock(&share_cache->rwlock);
899 /* First time to see the address? Create a new MR. */
900 lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr,
903 * Update the local cache if successfully created a new global MR. Even
904 * if failed to create one, there's no action to take in this datapath
905 * code. As returning LKey is invalid, this will eventually make HW
908 if (lkey != UINT32_MAX)
909 mr_btree_insert(bt, entry);
914 * Bottom-half of LKey search on datapath. First search in cache_bh[] and if
915 * misses, search in the global MR cache table and update the new entry to
916 * per-queue local caches.
919 * Pointer to pd of a device (net, regex, vdpa,...).
921 * Pointer to a global shared MR cache.
923 * Pointer to per-queue MR control structure.
928 * Searched LKey on success, UINT32_MAX on no match.
930 uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
931 struct mlx5_mr_share_cache *share_cache,
932 struct mlx5_mr_ctrl *mr_ctrl,
933 uintptr_t addr, unsigned int mr_ext_memseg_en)
937 /* Victim in top-half cache to replace with new entry. */
938 struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
940 /* Binary-search MR translation table. */
941 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
942 /* Update top-half cache. */
943 if (likely(lkey != UINT32_MAX)) {
944 *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
947 * If missed in local lookup table, search in the global cache
948 * and local cache_bh[] will be updated inside if possible.
949 * Top-half cache entry will also be updated.
951 lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl,
952 repl, addr, mr_ext_memseg_en);
953 if (unlikely(lkey == UINT32_MAX))
956 /* Update the most recently used entry. */
957 mr_ctrl->mru = mr_ctrl->head;
958 /* Point to the next victim, the oldest. */
959 mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
964 * Release all the created MRs and resources on global MR cache of a device.
968 * Pointer to a global shared MR cache.
971 mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
973 struct mlx5_mr *mr_next;
975 rte_rwlock_write_lock(&share_cache->rwlock);
976 /* Detach from MR list and move to free list. */
977 mr_next = LIST_FIRST(&share_cache->mr_list);
978 while (mr_next != NULL) {
979 struct mlx5_mr *mr = mr_next;
981 mr_next = LIST_NEXT(mr, mr);
983 LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
985 LIST_INIT(&share_cache->mr_list);
986 /* Free global cache. */
987 mlx5_mr_btree_free(&share_cache->cache);
988 rte_rwlock_write_unlock(&share_cache->rwlock);
989 /* Free all remaining MRs. */
990 mlx5_mr_garbage_collect(share_cache);
994 * Flush all of the local cache entries.
997 * Pointer to per-queue MR local cache.
1000 mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1002 /* Reset the most-recently-used index. */
1004 /* Reset the linear search array. */
1006 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1007 /* Reset the B-tree table. */
1008 mr_ctrl->cache_bh.len = 1;
1009 mr_ctrl->cache_bh.overflow = 0;
1010 /* Update the generation number. */
1011 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1012 DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1013 (void *)mr_ctrl, mr_ctrl->cur_gen);
1017 * Creates a memory region for external memory, that is memory which is not
1018 * part of the DPDK memory segments.
1021 * Pointer to pd of a device (net, regex, vdpa,...).
1023 * Starting virtual address of memory.
1025 * Length of memory segment being mapped.
1027 * Socket to allocate heap memory for the control structures.
1030 * Pointer to MR structure on success, NULL otherwise.
1033 mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
1034 mlx5_reg_mr_t reg_mr_cb)
1036 struct mlx5_mr *mr = NULL;
1038 mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1039 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE),
1040 RTE_CACHE_LINE_SIZE, socket_id);
1043 reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr);
1044 if (mr->pmd_mr.obj == NULL) {
1046 "Fail to create MR for address (%p)",
1051 mr->msl = NULL; /* Mark it is external memory. */
1056 "MR CREATED (%p) for external memory %p:\n"
1057 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1058 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1059 (void *)mr, (void *)addr,
1060 addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1061 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1066 * Dump all the created MRs and the global cache entries.
1069 * Pointer to Ethernet device shared context.
1072 mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
1074 #ifdef RTE_LIBRTE_MLX5_DEBUG
1079 rte_rwlock_read_lock(&share_cache->rwlock);
1080 /* Iterate all the existing MRs. */
1081 LIST_FOREACH(mr, &share_cache->mr_list, mr) {
1084 DRV_LOG(DEBUG, "MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
1085 mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1086 mr->ms_n, mr->ms_bmp_n);
1089 for (n = 0; n < mr->ms_bmp_n; ) {
1090 struct mr_cache_entry ret = { 0, };
1092 n = mr_find_next_chunk(mr, &ret, n);
1096 " chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1097 chunk_n++, ret.start, ret.end);
1100 DRV_LOG(DEBUG, "Dumping global cache %p", (void *)share_cache);
1101 mlx5_mr_btree_dump(&share_cache->cache);
1102 rte_rwlock_read_unlock(&share_cache->rwlock);