1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
7 #pragma GCC diagnostic ignored "-Wpedantic"
9 #include <infiniband/verbs.h>
11 #pragma GCC diagnostic error "-Wpedantic"
14 #include <rte_eal_memconfig.h>
15 #include <rte_mempool.h>
16 #include <rte_malloc.h>
17 #include <rte_rwlock.h>
18 #include <rte_bus_pci.h>
20 #include <mlx5_glue.h>
24 #include "mlx5_rxtx.h"
26 struct mr_find_contig_memsegs_data {
30 const struct rte_memseg_list *msl;
33 struct mr_update_mp_data {
34 struct rte_eth_dev *dev;
35 struct mlx5_mr_ctrl *mr_ctrl;
40 * Expand B-tree table to a given size. Can't be called with holding
41 * memory_hotplug_lock or sh->mr.rwlock due to rte_realloc().
44 * Pointer to B-tree structure.
46 * Number of entries for expansion.
49 * 0 on success, -1 on failure.
52 mr_btree_expand(struct mlx5_mr_btree *bt, int n)
60 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
61 * used inside if there's no room to expand. Because this is a quite
62 * rare case and a part of very slow path, it is very acceptable.
63 * Initially cache_bh[] will be given practically enough space and once
64 * it is expanded, expansion wouldn't be needed again ever.
66 mem = rte_realloc(bt->table, n * sizeof(struct mlx5_mr_cache), 0);
68 /* Not an error, B-tree search will be skipped. */
69 DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
73 DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
81 * Look up LKey from given B-tree lookup table, store the last index and return
85 * Pointer to B-tree structure.
87 * Pointer to index. Even on search failure, returns index where it stops
88 * searching so that index can be used when inserting a new entry.
93 * Searched LKey on success, UINT32_MAX on no match.
96 mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
98 struct mlx5_mr_cache *lkp_tbl;
102 MLX5_ASSERT(bt != NULL);
103 lkp_tbl = *bt->table;
105 /* First entry must be NULL for comparison. */
106 MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
107 lkp_tbl[0].lkey == UINT32_MAX));
110 register uint16_t delta = n >> 1;
112 if (addr < lkp_tbl[base + delta].start) {
119 MLX5_ASSERT(addr >= lkp_tbl[base].start);
121 if (addr < lkp_tbl[base].end)
122 return lkp_tbl[base].lkey;
128 * Insert an entry to B-tree lookup table.
131 * Pointer to B-tree structure.
133 * Pointer to new entry to insert.
136 * 0 on success, -1 on failure.
139 mr_btree_insert(struct mlx5_mr_btree *bt, struct mlx5_mr_cache *entry)
141 struct mlx5_mr_cache *lkp_tbl;
145 MLX5_ASSERT(bt != NULL);
146 MLX5_ASSERT(bt->len <= bt->size);
147 MLX5_ASSERT(bt->len > 0);
148 lkp_tbl = *bt->table;
149 /* Find out the slot for insertion. */
150 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
152 "abort insertion to B-tree(%p): already exist at"
153 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
154 (void *)bt, idx, entry->start, entry->end, entry->lkey);
155 /* Already exist, return. */
158 /* If table is full, return error. */
159 if (unlikely(bt->len == bt->size)) {
165 shift = (bt->len - idx) * sizeof(struct mlx5_mr_cache);
167 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
168 lkp_tbl[idx] = *entry;
171 "inserted B-tree(%p)[%u],"
172 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
173 (void *)bt, idx, entry->start, entry->end, entry->lkey);
178 * Initialize B-tree and allocate memory for lookup table.
181 * Pointer to B-tree structure.
183 * Number of entries to allocate.
185 * NUMA socket on which memory must be allocated.
188 * 0 on success, a negative errno value otherwise and rte_errno is set.
191 mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
197 MLX5_ASSERT(!bt->table && !bt->size);
198 memset(bt, 0, sizeof(*bt));
199 bt->table = rte_calloc_socket("B-tree table",
200 n, sizeof(struct mlx5_mr_cache),
202 if (bt->table == NULL) {
204 DEBUG("failed to allocate memory for btree cache on socket %d",
209 /* First entry must be NULL for binary search. */
210 (*bt->table)[bt->len++] = (struct mlx5_mr_cache) {
213 DEBUG("initialized B-tree %p with table %p",
214 (void *)bt, (void *)bt->table);
219 * Free B-tree resources.
222 * Pointer to B-tree structure.
225 mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
229 DEBUG("freeing B-tree %p with table %p",
230 (void *)bt, (void *)bt->table);
232 memset(bt, 0, sizeof(*bt));
236 * Dump all the entries in a B-tree
239 * Pointer to B-tree structure.
242 mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
244 #ifdef RTE_LIBRTE_MLX5_DEBUG
246 struct mlx5_mr_cache *lkp_tbl;
250 lkp_tbl = *bt->table;
251 for (idx = 0; idx < bt->len; ++idx) {
252 struct mlx5_mr_cache *entry = &lkp_tbl[idx];
254 DEBUG("B-tree(%p)[%u],"
255 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
256 (void *)bt, idx, entry->start, entry->end, entry->lkey);
262 * Find virtually contiguous memory chunk in a given MR.
265 * Pointer to MR structure.
267 * Pointer to returning MR cache entry. If not found, this will not be
270 * Start index of the memseg bitmap.
273 * Next index to go on lookup.
276 mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
283 /* MR for external memory doesn't have memseg list. */
284 if (mr->msl == NULL) {
285 struct ibv_mr *ibv_mr = mr->ibv_mr;
287 MLX5_ASSERT(mr->ms_bmp_n == 1);
288 MLX5_ASSERT(mr->ms_n == 1);
289 MLX5_ASSERT(base_idx == 0);
291 * Can't search it from memseg list but get it directly from
292 * verbs MR as there's only one chunk.
294 entry->start = (uintptr_t)ibv_mr->addr;
295 entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length;
296 entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
297 /* Returning 1 ends iteration. */
300 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
301 if (rte_bitmap_get(mr->ms_bmp, idx)) {
302 const struct rte_memseg_list *msl;
303 const struct rte_memseg *ms;
306 ms = rte_fbarray_get(&msl->memseg_arr,
307 mr->ms_base_idx + idx);
308 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
311 end = ms->addr_64 + ms->hugepage_sz;
313 /* Passed the end of a fragment. */
318 /* Found one chunk. */
319 entry->start = start;
321 entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
327 * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
328 * Then, this entry will have to be searched by mr_lookup_dev_list() in
329 * mlx5_mr_create() on miss.
332 * Pointer to Ethernet device shared context.
334 * Pointer to MR to insert.
337 * 0 on success, -1 on failure.
340 mr_insert_dev_cache(struct mlx5_ibv_shared *sh, struct mlx5_mr *mr)
344 DRV_LOG(DEBUG, "device %s inserting MR(%p) to global cache",
345 sh->ibdev_name, (void *)mr);
346 for (n = 0; n < mr->ms_bmp_n; ) {
347 struct mlx5_mr_cache entry;
349 memset(&entry, 0, sizeof(entry));
350 /* Find a contiguous chunk and advance the index. */
351 n = mr_find_next_chunk(mr, &entry, n);
354 if (mr_btree_insert(&sh->mr.cache, &entry) < 0) {
356 * Overflowed, but the global table cannot be expanded
357 * because of deadlock.
366 * Look up address in the original global MR list.
369 * Pointer to Ethernet device shared context.
371 * Pointer to returning MR cache entry. If no match, this will not be updated.
376 * Found MR on match, NULL otherwise.
378 static struct mlx5_mr *
379 mr_lookup_dev_list(struct mlx5_ibv_shared *sh, struct mlx5_mr_cache *entry,
384 /* Iterate all the existing MRs. */
385 LIST_FOREACH(mr, &sh->mr.mr_list, mr) {
390 for (n = 0; n < mr->ms_bmp_n; ) {
391 struct mlx5_mr_cache ret;
393 memset(&ret, 0, sizeof(ret));
394 n = mr_find_next_chunk(mr, &ret, n);
395 if (addr >= ret.start && addr < ret.end) {
406 * Look up address on device.
409 * Pointer to Ethernet device shared context.
411 * Pointer to returning MR cache entry. If no match, this will not be updated.
416 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
419 mr_lookup_dev(struct mlx5_ibv_shared *sh, struct mlx5_mr_cache *entry,
423 uint32_t lkey = UINT32_MAX;
427 * If the global cache has overflowed since it failed to expand the
428 * B-tree table, it can't have all the existing MRs. Then, the address
429 * has to be searched by traversing the original MR list instead, which
430 * is very slow path. Otherwise, the global cache is all inclusive.
432 if (!unlikely(sh->mr.cache.overflow)) {
433 lkey = mr_btree_lookup(&sh->mr.cache, &idx, addr);
434 if (lkey != UINT32_MAX)
435 *entry = (*sh->mr.cache.table)[idx];
437 /* Falling back to the slowest path. */
438 mr = mr_lookup_dev_list(sh, entry, addr);
442 MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
448 * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
449 * can raise memory free event and the callback function will spin on the lock.
452 * Pointer to MR to free.
455 mr_free(struct mlx5_mr *mr)
459 DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
460 if (mr->ibv_mr != NULL)
461 claim_zero(mlx5_glue->dereg_mr(mr->ibv_mr));
462 if (mr->ms_bmp != NULL)
463 rte_bitmap_free(mr->ms_bmp);
468 * Release resources of detached MR having no online entry.
471 * Pointer to Ethernet device shared context.
474 mlx5_mr_garbage_collect(struct mlx5_ibv_shared *sh)
476 struct mlx5_mr *mr_next;
477 struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
479 /* Must be called from the primary process. */
480 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
482 * MR can't be freed with holding the lock because rte_free() could call
483 * memory free callback function. This will be a deadlock situation.
485 rte_rwlock_write_lock(&sh->mr.rwlock);
486 /* Detach the whole free list and release it after unlocking. */
487 free_list = sh->mr.mr_free_list;
488 LIST_INIT(&sh->mr.mr_free_list);
489 rte_rwlock_write_unlock(&sh->mr.rwlock);
490 /* Release resources. */
491 mr_next = LIST_FIRST(&free_list);
492 while (mr_next != NULL) {
493 struct mlx5_mr *mr = mr_next;
495 mr_next = LIST_NEXT(mr, mr);
500 /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
502 mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
503 const struct rte_memseg *ms, size_t len, void *arg)
505 struct mr_find_contig_memsegs_data *data = arg;
507 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
509 /* Found, save it and stop walking. */
510 data->start = ms->addr_64;
511 data->end = ms->addr_64 + len;
517 * Create a new global Memory Region (MR) for a missing virtual address.
518 * This API should be called on a secondary process, then a request is sent to
519 * the primary process in order to create a MR for the address. As the global MR
520 * list is on the shared memory, following LKey lookup should succeed unless the
524 * Pointer to Ethernet device.
526 * Pointer to returning MR cache entry, found in the global cache or newly
527 * created. If failed to create one, this will not be updated.
529 * Target virtual address to register.
532 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
535 mlx5_mr_create_secondary(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
538 struct mlx5_priv *priv = dev->data->dev_private;
541 DEBUG("port %u requesting MR creation for address (%p)",
542 dev->data->port_id, (void *)addr);
543 ret = mlx5_mp_req_mr_create(dev, addr);
545 DEBUG("port %u fail to request MR creation for address (%p)",
546 dev->data->port_id, (void *)addr);
549 rte_rwlock_read_lock(&priv->sh->mr.rwlock);
550 /* Fill in output data. */
551 mr_lookup_dev(priv->sh, entry, addr);
552 /* Lookup can't fail. */
553 MLX5_ASSERT(entry->lkey != UINT32_MAX);
554 rte_rwlock_read_unlock(&priv->sh->mr.rwlock);
555 DEBUG("port %u MR CREATED by primary process for %p:\n"
556 " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
557 dev->data->port_id, (void *)addr,
558 entry->start, entry->end, entry->lkey);
563 * Create a new global Memory Region (MR) for a missing virtual address.
564 * Register entire virtually contiguous memory chunk around the address.
565 * This must be called from the primary process.
568 * Pointer to Ethernet device.
570 * Pointer to returning MR cache entry, found in the global cache or newly
571 * created. If failed to create one, this will not be updated.
573 * Target virtual address to register.
576 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
579 mlx5_mr_create_primary(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
582 struct mlx5_priv *priv = dev->data->dev_private;
583 struct mlx5_ibv_shared *sh = priv->sh;
584 struct mlx5_dev_config *config = &priv->config;
585 const struct rte_memseg_list *msl;
586 const struct rte_memseg *ms;
587 struct mlx5_mr *mr = NULL;
592 int ms_idx_shift = -1;
594 struct mr_find_contig_memsegs_data data = {
597 struct mr_find_contig_memsegs_data data_re;
599 DRV_LOG(DEBUG, "port %u creating a MR using address (%p)",
600 dev->data->port_id, (void *)addr);
602 * Release detached MRs if any. This can't be called with holding either
603 * memory_hotplug_lock or sh->mr.rwlock. MRs on the free list have
604 * been detached by the memory free event but it couldn't be released
605 * inside the callback due to deadlock. As a result, releasing resources
606 * is quite opportunistic.
608 mlx5_mr_garbage_collect(sh);
610 * If enabled, find out a contiguous virtual address chunk in use, to
611 * which the given address belongs, in order to register maximum range.
612 * In the best case where mempools are not dynamically recreated and
613 * '--socket-mem' is specified as an EAL option, it is very likely to
614 * have only one MR(LKey) per a socket and per a hugepage-size even
615 * though the system memory is highly fragmented. As the whole memory
616 * chunk will be pinned by kernel, it can't be reused unless entire
617 * chunk is freed from EAL.
619 * If disabled, just register one memseg (page). Then, memory
620 * consumption will be minimized but it may drop performance if there
621 * are many MRs to lookup on the datapath.
623 if (!config->mr_ext_memseg_en) {
624 data.msl = rte_mem_virt2memseg_list((void *)addr);
625 data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
626 data.end = data.start + data.msl->page_sz;
627 } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
629 "port %u unable to find virtually contiguous"
630 " chunk for address (%p)."
631 " rte_memseg_contig_walk() failed.",
632 dev->data->port_id, (void *)addr);
637 /* Addresses must be page-aligned. */
638 MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
639 MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
641 ms = rte_mem_virt2memseg((void *)data.start, msl);
642 len = data.end - data.start;
643 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
644 /* Number of memsegs in the range. */
645 ms_n = len / msl->page_sz;
646 DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
647 " page_sz=0x%" PRIx64 ", ms_n=%u",
648 dev->data->port_id, (void *)addr,
649 data.start, data.end, msl->page_sz, ms_n);
650 /* Size of memory for bitmap. */
651 bmp_size = rte_bitmap_get_memory_footprint(ms_n);
652 mr = rte_zmalloc_socket(NULL,
653 RTE_ALIGN_CEIL(sizeof(*mr),
654 RTE_CACHE_LINE_SIZE) +
656 RTE_CACHE_LINE_SIZE, msl->socket_id);
658 DEBUG("port %u unable to allocate memory for a new MR of"
660 dev->data->port_id, (void *)addr);
666 * Save the index of the first memseg and initialize memseg bitmap. To
667 * see if a memseg of ms_idx in the memseg-list is still valid, check:
668 * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
670 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
671 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
672 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
673 if (mr->ms_bmp == NULL) {
674 DEBUG("port %u unable to initialize bitmap for a new MR of"
676 dev->data->port_id, (void *)addr);
681 * Should recheck whether the extended contiguous chunk is still valid.
682 * Because memory_hotplug_lock can't be held if there's any memory
683 * related calls in a critical path, resource allocation above can't be
684 * locked. If the memory has been changed at this point, try again with
685 * just single page. If not, go on with the big chunk atomically from
688 rte_mcfg_mem_read_lock();
690 if (len > msl->page_sz &&
691 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
692 DEBUG("port %u unable to find virtually contiguous"
693 " chunk for address (%p)."
694 " rte_memseg_contig_walk() failed.",
695 dev->data->port_id, (void *)addr);
699 if (data.start != data_re.start || data.end != data_re.end) {
701 * The extended contiguous chunk has been changed. Try again
702 * with single memseg instead.
704 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
705 data.end = data.start + msl->page_sz;
706 rte_mcfg_mem_read_unlock();
708 goto alloc_resources;
710 MLX5_ASSERT(data.msl == data_re.msl);
711 rte_rwlock_write_lock(&sh->mr.rwlock);
713 * Check the address is really missing. If other thread already created
714 * one or it is not found due to overflow, abort and return.
716 if (mr_lookup_dev(sh, entry, addr) != UINT32_MAX) {
718 * Insert to the global cache table. It may fail due to
719 * low-on-memory. Then, this entry will have to be searched
722 mr_btree_insert(&sh->mr.cache, entry);
723 DEBUG("port %u found MR for %p on final lookup, abort",
724 dev->data->port_id, (void *)addr);
725 rte_rwlock_write_unlock(&sh->mr.rwlock);
726 rte_mcfg_mem_read_unlock();
728 * Must be unlocked before calling rte_free() because
729 * mlx5_mr_mem_event_free_cb() can be called inside.
735 * Trim start and end addresses for verbs MR. Set bits for registering
736 * memsegs but exclude already registered ones. Bitmap can be
739 for (n = 0; n < ms_n; ++n) {
741 struct mlx5_mr_cache ret;
743 memset(&ret, 0, sizeof(ret));
744 start = data_re.start + n * msl->page_sz;
745 /* Exclude memsegs already registered by other MRs. */
746 if (mr_lookup_dev(sh, &ret, start) == UINT32_MAX) {
748 * Start from the first unregistered memseg in the
751 if (ms_idx_shift == -1) {
752 mr->ms_base_idx += n;
756 data.end = start + msl->page_sz;
757 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
761 len = data.end - data.start;
762 mr->ms_bmp_n = len / msl->page_sz;
763 MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
765 * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
766 * called with holding the memory lock because it doesn't use
767 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
768 * through mlx5_alloc_verbs_buf().
770 mr->ibv_mr = mlx5_glue->reg_mr(sh->pd, (void *)data.start, len,
771 IBV_ACCESS_LOCAL_WRITE);
772 if (mr->ibv_mr == NULL) {
773 DEBUG("port %u fail to create a verbs MR for address (%p)",
774 dev->data->port_id, (void *)addr);
778 MLX5_ASSERT((uintptr_t)mr->ibv_mr->addr == data.start);
779 MLX5_ASSERT(mr->ibv_mr->length == len);
780 LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
781 DEBUG("port %u MR CREATED (%p) for %p:\n"
782 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
783 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
784 dev->data->port_id, (void *)mr, (void *)addr,
785 data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
786 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
787 /* Insert to the global cache table. */
788 mr_insert_dev_cache(sh, mr);
789 /* Fill in output data. */
790 mr_lookup_dev(sh, entry, addr);
791 /* Lookup can't fail. */
792 MLX5_ASSERT(entry->lkey != UINT32_MAX);
793 rte_rwlock_write_unlock(&sh->mr.rwlock);
794 rte_mcfg_mem_read_unlock();
797 rte_rwlock_write_unlock(&sh->mr.rwlock);
799 rte_mcfg_mem_read_unlock();
802 * In case of error, as this can be called in a datapath, a warning
803 * message per an error is preferable instead. Must be unlocked before
804 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
812 * Create a new global Memory Region (MR) for a missing virtual address.
813 * This can be called from primary and secondary process.
816 * Pointer to Ethernet device.
818 * Pointer to returning MR cache entry, found in the global cache or newly
819 * created. If failed to create one, this will not be updated.
821 * Target virtual address to register.
824 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
827 mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
832 switch (rte_eal_process_type()) {
833 case RTE_PROC_PRIMARY:
834 ret = mlx5_mr_create_primary(dev, entry, addr);
836 case RTE_PROC_SECONDARY:
837 ret = mlx5_mr_create_secondary(dev, entry, addr);
846 * Rebuild the global B-tree cache of device from the original MR list.
849 * Pointer to Ethernet device shared context.
852 mr_rebuild_dev_cache(struct mlx5_ibv_shared *sh)
856 DRV_LOG(DEBUG, "device %s rebuild dev cache[]", sh->ibdev_name);
857 /* Flush cache to rebuild. */
858 sh->mr.cache.len = 1;
859 sh->mr.cache.overflow = 0;
860 /* Iterate all the existing MRs. */
861 LIST_FOREACH(mr, &sh->mr.mr_list, mr)
862 if (mr_insert_dev_cache(sh, mr) < 0)
867 * Callback for memory free event. Iterate freed memsegs and check whether it
868 * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
869 * result, the MR would be fragmented. If it becomes empty, the MR will be freed
870 * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
871 * secondary process, the garbage collector will be called in primary process
872 * as the secondary process can't call mlx5_mr_create().
874 * The global cache must be rebuilt if there's any change and this event has to
875 * be propagated to dataplane threads to flush the local caches.
878 * Pointer to the Ethernet device shared context.
880 * Address of freed memory.
882 * Size of freed memory.
885 mlx5_mr_mem_event_free_cb(struct mlx5_ibv_shared *sh,
886 const void *addr, size_t len)
888 const struct rte_memseg_list *msl;
894 DEBUG("device %s free callback: addr=%p, len=%zu",
895 sh->ibdev_name, addr, len);
896 msl = rte_mem_virt2memseg_list(addr);
897 /* addr and len must be page-aligned. */
898 MLX5_ASSERT((uintptr_t)addr ==
899 RTE_ALIGN((uintptr_t)addr, msl->page_sz));
900 MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
901 ms_n = len / msl->page_sz;
902 rte_rwlock_write_lock(&sh->mr.rwlock);
903 /* Clear bits of freed memsegs from MR. */
904 for (i = 0; i < ms_n; ++i) {
905 const struct rte_memseg *ms;
906 struct mlx5_mr_cache entry;
911 /* Find MR having this memseg. */
912 start = (uintptr_t)addr + i * msl->page_sz;
913 mr = mr_lookup_dev_list(sh, &entry, start);
916 MLX5_ASSERT(mr->msl); /* Can't be external memory. */
917 ms = rte_mem_virt2memseg((void *)start, msl);
918 MLX5_ASSERT(ms != NULL);
919 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
920 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
921 pos = ms_idx - mr->ms_base_idx;
922 MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
923 MLX5_ASSERT(pos < mr->ms_bmp_n);
924 DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
925 sh->ibdev_name, (void *)mr, pos, (void *)start);
926 rte_bitmap_clear(mr->ms_bmp, pos);
927 if (--mr->ms_n == 0) {
929 LIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr);
930 DEBUG("device %s remove MR(%p) from list",
931 sh->ibdev_name, (void *)mr);
934 * MR is fragmented or will be freed. the global cache must be
940 mr_rebuild_dev_cache(sh);
942 * Flush local caches by propagating invalidation across cores.
943 * rte_smp_wmb() is enough to synchronize this event. If one of
944 * freed memsegs is seen by other core, that means the memseg
945 * has been allocated by allocator, which will come after this
946 * free call. Therefore, this store instruction (incrementing
947 * generation below) will be guaranteed to be seen by other core
948 * before the core sees the newly allocated memory.
951 DEBUG("broadcasting local cache flush, gen=%d",
955 rte_rwlock_write_unlock(&sh->mr.rwlock);
959 * Callback for memory event. This can be called from both primary and secondary
970 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
971 size_t len, void *arg __rte_unused)
973 struct mlx5_ibv_shared *sh;
974 struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
976 /* Must be called from the primary process. */
977 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
978 switch (event_type) {
979 case RTE_MEM_EVENT_FREE:
980 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
981 /* Iterate all the existing mlx5 devices. */
982 LIST_FOREACH(sh, dev_list, mem_event_cb)
983 mlx5_mr_mem_event_free_cb(sh, addr, len);
984 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
986 case RTE_MEM_EVENT_ALLOC:
993 * Look up address in the global MR cache table. If not found, create a new MR.
994 * Insert the found/created entry to local bottom-half cache table.
997 * Pointer to Ethernet device.
999 * Pointer to per-queue MR control structure.
1001 * Pointer to returning MR cache entry, found in the global cache or newly
1002 * created. If failed to create one, this is not written.
1007 * Searched LKey on success, UINT32_MAX on no match.
1010 mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
1011 struct mlx5_mr_cache *entry, uintptr_t addr)
1013 struct mlx5_priv *priv = dev->data->dev_private;
1014 struct mlx5_ibv_shared *sh = priv->sh;
1015 struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
1019 /* If local cache table is full, try to double it. */
1020 if (unlikely(bt->len == bt->size))
1021 mr_btree_expand(bt, bt->size << 1);
1022 /* Look up in the global cache. */
1023 rte_rwlock_read_lock(&sh->mr.rwlock);
1024 lkey = mr_btree_lookup(&sh->mr.cache, &idx, addr);
1025 if (lkey != UINT32_MAX) {
1027 *entry = (*sh->mr.cache.table)[idx];
1028 rte_rwlock_read_unlock(&sh->mr.rwlock);
1030 * Update local cache. Even if it fails, return the found entry
1031 * to update top-half cache. Next time, this entry will be found
1032 * in the global cache.
1034 mr_btree_insert(bt, entry);
1037 rte_rwlock_read_unlock(&sh->mr.rwlock);
1038 /* First time to see the address? Create a new MR. */
1039 lkey = mlx5_mr_create(dev, entry, addr);
1041 * Update the local cache if successfully created a new global MR. Even
1042 * if failed to create one, there's no action to take in this datapath
1043 * code. As returning LKey is invalid, this will eventually make HW
1046 if (lkey != UINT32_MAX)
1047 mr_btree_insert(bt, entry);
1052 * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
1053 * misses, search in the global MR cache table and update the new entry to
1054 * per-queue local caches.
1057 * Pointer to Ethernet device.
1059 * Pointer to per-queue MR control structure.
1064 * Searched LKey on success, UINT32_MAX on no match.
1067 mlx5_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
1071 uint16_t bh_idx = 0;
1072 /* Victim in top-half cache to replace with new entry. */
1073 struct mlx5_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
1075 /* Binary-search MR translation table. */
1076 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1077 /* Update top-half cache. */
1078 if (likely(lkey != UINT32_MAX)) {
1079 *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1082 * If missed in local lookup table, search in the global cache
1083 * and local cache_bh[] will be updated inside if possible.
1084 * Top-half cache entry will also be updated.
1086 lkey = mlx5_mr_lookup_dev(dev, mr_ctrl, repl, addr);
1087 if (unlikely(lkey == UINT32_MAX))
1090 /* Update the most recently used entry. */
1091 mr_ctrl->mru = mr_ctrl->head;
1092 /* Point to the next victim, the oldest. */
1093 mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1098 * Bottom-half of LKey search on Rx.
1101 * Pointer to Rx queue structure.
1106 * Searched LKey on success, UINT32_MAX on no match.
1109 mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
1111 struct mlx5_rxq_ctrl *rxq_ctrl =
1112 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1113 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
1114 struct mlx5_priv *priv = rxq_ctrl->priv;
1116 return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1120 * Bottom-half of LKey search on Tx.
1123 * Pointer to Tx queue structure.
1128 * Searched LKey on success, UINT32_MAX on no match.
1131 mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
1133 struct mlx5_txq_ctrl *txq_ctrl =
1134 container_of(txq, struct mlx5_txq_ctrl, txq);
1135 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
1136 struct mlx5_priv *priv = txq_ctrl->priv;
1138 return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1142 * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
1143 * list, register the mempool of the mbuf as externally allocated memory.
1146 * Pointer to Tx queue structure.
1151 * Searched LKey on success, UINT32_MAX on no match.
1154 mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
1156 uintptr_t addr = (uintptr_t)mb->buf_addr;
1159 lkey = mlx5_tx_addr2mr_bh(txq, addr);
1160 if (lkey == UINT32_MAX && rte_errno == ENXIO) {
1161 /* Mempool may have externally allocated memory. */
1162 return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
1168 * Flush all of the local cache entries.
1171 * Pointer to per-queue MR control structure.
1174 mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1176 /* Reset the most-recently-used index. */
1178 /* Reset the linear search array. */
1180 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1181 /* Reset the B-tree table. */
1182 mr_ctrl->cache_bh.len = 1;
1183 mr_ctrl->cache_bh.overflow = 0;
1184 /* Update the generation number. */
1185 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1186 DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1187 (void *)mr_ctrl, mr_ctrl->cur_gen);
1191 * Creates a memory region for external memory, that is memory which is not
1192 * part of the DPDK memory segments.
1195 * Pointer to the ethernet device.
1197 * Starting virtual address of memory.
1199 * Length of memory segment being mapped.
1201 * Socket to allocate heap memory for the control structures.
1204 * Pointer to MR structure on success, NULL otherwise.
1206 static struct mlx5_mr *
1207 mlx5_create_mr_ext(struct rte_eth_dev *dev, uintptr_t addr, size_t len,
1210 struct mlx5_priv *priv = dev->data->dev_private;
1211 struct mlx5_mr *mr = NULL;
1213 mr = rte_zmalloc_socket(NULL,
1214 RTE_ALIGN_CEIL(sizeof(*mr),
1215 RTE_CACHE_LINE_SIZE),
1216 RTE_CACHE_LINE_SIZE, socket_id);
1219 mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)addr, len,
1220 IBV_ACCESS_LOCAL_WRITE);
1221 if (mr->ibv_mr == NULL) {
1223 "port %u fail to create a verbs MR for address (%p)",
1224 dev->data->port_id, (void *)addr);
1228 mr->msl = NULL; /* Mark it is external memory. */
1233 "port %u MR CREATED (%p) for external memory %p:\n"
1234 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1235 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1236 dev->data->port_id, (void *)mr, (void *)addr,
1237 addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
1238 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1243 * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
1245 * Externally allocated chunk is registered and a MR is created for the chunk.
1246 * The MR object is added to the global list. If memseg list of a MR object
1247 * (mr->msl) is null, the MR object can be regarded as externally allocated
1250 * Once external memory is registered, it should be static. If the memory is
1251 * freed and the virtual address range has different physical memory mapped
1252 * again, it may cause crash on device due to the wrong translation entry. PMD
1253 * can't track the free event of the external memory for now.
1256 mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
1257 struct rte_mempool_memhdr *memhdr,
1258 unsigned mem_idx __rte_unused)
1260 struct mr_update_mp_data *data = opaque;
1261 struct rte_eth_dev *dev = data->dev;
1262 struct mlx5_priv *priv = dev->data->dev_private;
1263 struct mlx5_ibv_shared *sh = priv->sh;
1264 struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
1265 struct mlx5_mr *mr = NULL;
1266 uintptr_t addr = (uintptr_t)memhdr->addr;
1267 size_t len = memhdr->len;
1268 struct mlx5_mr_cache entry;
1271 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1272 /* If already registered, it should return. */
1273 rte_rwlock_read_lock(&sh->mr.rwlock);
1274 lkey = mr_lookup_dev(sh, &entry, addr);
1275 rte_rwlock_read_unlock(&sh->mr.rwlock);
1276 if (lkey != UINT32_MAX)
1278 DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
1279 dev->data->port_id, mem_idx, mp->name);
1280 mr = mlx5_create_mr_ext(dev, addr, len, mp->socket_id);
1283 "port %u unable to allocate a new MR of"
1285 dev->data->port_id, mp->name);
1289 rte_rwlock_write_lock(&sh->mr.rwlock);
1290 LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
1291 /* Insert to the global cache table. */
1292 mr_insert_dev_cache(sh, mr);
1293 rte_rwlock_write_unlock(&sh->mr.rwlock);
1294 /* Insert to the local cache table */
1295 mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr);
1299 * Finds the first ethdev that match the pci device.
1300 * The existence of multiple ethdev per pci device is only with representors.
1301 * On such case, it is enough to get only one of the ports as they all share
1302 * the same ibv context.
1305 * Pointer to the PCI device.
1308 * Pointer to the ethdev if found, NULL otherwise.
1310 static struct rte_eth_dev *
1311 pci_dev_to_eth_dev(struct rte_pci_device *pdev)
1315 RTE_ETH_FOREACH_DEV_OF(port_id, &pdev->device)
1316 return &rte_eth_devices[port_id];
1321 * DPDK callback to DMA map external memory to a PCI device.
1324 * Pointer to the PCI device.
1326 * Starting virtual address of memory to be mapped.
1328 * Starting IOVA address of memory to be mapped.
1330 * Length of memory segment being mapped.
1333 * 0 on success, negative value on error.
1336 mlx5_dma_map(struct rte_pci_device *pdev, void *addr,
1337 uint64_t iova __rte_unused, size_t len)
1339 struct rte_eth_dev *dev;
1341 struct mlx5_priv *priv;
1342 struct mlx5_ibv_shared *sh;
1344 dev = pci_dev_to_eth_dev(pdev);
1346 DRV_LOG(WARNING, "unable to find matching ethdev "
1347 "to PCI device %p", (void *)pdev);
1351 priv = dev->data->dev_private;
1352 mr = mlx5_create_mr_ext(dev, (uintptr_t)addr, len, SOCKET_ID_ANY);
1355 "port %u unable to dma map", dev->data->port_id);
1360 rte_rwlock_write_lock(&sh->mr.rwlock);
1361 LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
1362 /* Insert to the global cache table. */
1363 mr_insert_dev_cache(sh, mr);
1364 rte_rwlock_write_unlock(&sh->mr.rwlock);
1369 * DPDK callback to DMA unmap external memory to a PCI device.
1372 * Pointer to the PCI device.
1374 * Starting virtual address of memory to be unmapped.
1376 * Starting IOVA address of memory to be unmapped.
1378 * Length of memory segment being unmapped.
1381 * 0 on success, negative value on error.
1384 mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr,
1385 uint64_t iova __rte_unused, size_t len __rte_unused)
1387 struct rte_eth_dev *dev;
1388 struct mlx5_priv *priv;
1389 struct mlx5_ibv_shared *sh;
1391 struct mlx5_mr_cache entry;
1393 dev = pci_dev_to_eth_dev(pdev);
1395 DRV_LOG(WARNING, "unable to find matching ethdev "
1396 "to PCI device %p", (void *)pdev);
1400 priv = dev->data->dev_private;
1402 rte_rwlock_read_lock(&sh->mr.rwlock);
1403 mr = mr_lookup_dev_list(sh, &entry, (uintptr_t)addr);
1405 rte_rwlock_read_unlock(&sh->mr.rwlock);
1406 DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered "
1407 "to PCI device %p", (uintptr_t)addr,
1412 LIST_REMOVE(mr, mr);
1413 LIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr);
1414 DEBUG("port %u remove MR(%p) from list", dev->data->port_id,
1416 mr_rebuild_dev_cache(sh);
1418 * Flush local caches by propagating invalidation across cores.
1419 * rte_smp_wmb() is enough to synchronize this event. If one of
1420 * freed memsegs is seen by other core, that means the memseg
1421 * has been allocated by allocator, which will come after this
1422 * free call. Therefore, this store instruction (incrementing
1423 * generation below) will be guaranteed to be seen by other core
1424 * before the core sees the newly allocated memory.
1427 DEBUG("broadcasting local cache flush, gen=%d", sh->mr.dev_gen);
1429 rte_rwlock_read_unlock(&sh->mr.rwlock);
1434 * Register MR for entire memory chunks in a Mempool having externally allocated
1435 * memory and fill in local cache.
1438 * Pointer to Ethernet device.
1440 * Pointer to per-queue MR control structure.
1442 * Pointer to registering Mempool.
1445 * 0 on success, -1 on failure.
1448 mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
1449 struct rte_mempool *mp)
1451 struct mr_update_mp_data data = {
1457 rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data);
1462 * Register MR entire memory chunks in a Mempool having externally allocated
1463 * memory and search LKey of the address to return.
1466 * Pointer to Ethernet device.
1470 * Pointer to registering Mempool where addr belongs.
1473 * LKey for address on success, UINT32_MAX on failure.
1476 mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
1477 struct rte_mempool *mp)
1479 struct mlx5_txq_ctrl *txq_ctrl =
1480 container_of(txq, struct mlx5_txq_ctrl, txq);
1481 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
1482 struct mlx5_priv *priv = txq_ctrl->priv;
1484 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1486 "port %u using address (%p) from unregistered mempool"
1487 " having externally allocated memory"
1488 " in secondary process, please create mempool"
1489 " prior to rte_eth_dev_start()",
1490 PORT_ID(priv), (void *)addr);
1493 mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
1494 return mlx5_tx_addr2mr_bh(txq, addr);
1497 /* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
1499 mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
1500 struct rte_mempool_memhdr *memhdr,
1501 unsigned mem_idx __rte_unused)
1503 struct mr_update_mp_data *data = opaque;
1506 /* Stop iteration if failed in the previous walk. */
1509 /* Register address of the chunk and update local caches. */
1510 lkey = mlx5_mr_addr2mr_bh(data->dev, data->mr_ctrl,
1511 (uintptr_t)memhdr->addr);
1512 if (lkey == UINT32_MAX)
1517 * Register entire memory chunks in a Mempool.
1520 * Pointer to Ethernet device.
1522 * Pointer to per-queue MR control structure.
1524 * Pointer to registering Mempool.
1527 * 0 on success, -1 on failure.
1530 mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
1531 struct rte_mempool *mp)
1533 struct mr_update_mp_data data = {
1539 rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
1540 if (data.ret < 0 && rte_errno == ENXIO) {
1541 /* Mempool may have externally allocated memory. */
1542 return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp);
1548 * Dump all the created MRs and the global cache entries.
1551 * Pointer to Ethernet device shared context.
1554 mlx5_mr_dump_dev(struct mlx5_ibv_shared *sh __rte_unused)
1556 #ifdef RTE_LIBRTE_MLX5_DEBUG
1561 rte_rwlock_read_lock(&sh->mr.rwlock);
1562 /* Iterate all the existing MRs. */
1563 LIST_FOREACH(mr, &sh->mr.mr_list, mr) {
1566 DEBUG("device %s MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
1567 sh->ibdev_name, mr_n++,
1568 rte_cpu_to_be_32(mr->ibv_mr->lkey),
1569 mr->ms_n, mr->ms_bmp_n);
1572 for (n = 0; n < mr->ms_bmp_n; ) {
1573 struct mlx5_mr_cache ret = { 0, };
1575 n = mr_find_next_chunk(mr, &ret, n);
1578 DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1579 chunk_n++, ret.start, ret.end);
1582 DEBUG("device %s dumping global cache", sh->ibdev_name);
1583 mlx5_mr_btree_dump(&sh->mr.cache);
1584 rte_rwlock_read_unlock(&sh->mr.rwlock);
1589 * Release all the created MRs and resources for shared device context.
1593 * Pointer to Ethernet device shared context.
1596 mlx5_mr_release(struct mlx5_ibv_shared *sh)
1598 struct mlx5_mr *mr_next;
1600 if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG))
1601 mlx5_mr_dump_dev(sh);
1602 rte_rwlock_write_lock(&sh->mr.rwlock);
1603 /* Detach from MR list and move to free list. */
1604 mr_next = LIST_FIRST(&sh->mr.mr_list);
1605 while (mr_next != NULL) {
1606 struct mlx5_mr *mr = mr_next;
1608 mr_next = LIST_NEXT(mr, mr);
1609 LIST_REMOVE(mr, mr);
1610 LIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr);
1612 LIST_INIT(&sh->mr.mr_list);
1613 /* Free global cache. */
1614 mlx5_mr_btree_free(&sh->mr.cache);
1615 rte_rwlock_write_unlock(&sh->mr.rwlock);
1616 /* Free all remaining MRs. */
1617 mlx5_mr_garbage_collect(sh);