1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
7 #pragma GCC diagnostic ignored "-Wpedantic"
9 #include <infiniband/verbs.h>
11 #pragma GCC diagnostic error "-Wpedantic"
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_rwlock.h>
20 #include "mlx5_rxtx.h"
21 #include "mlx5_glue.h"
23 struct mr_find_contig_memsegs_data {
27 const struct rte_memseg_list *msl;
30 struct mr_update_mp_data {
31 struct rte_eth_dev *dev;
32 struct mlx5_mr_ctrl *mr_ctrl;
37 * Expand B-tree table to a given size. Can't be called with holding
38 * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc().
41 * Pointer to B-tree structure.
43 * Number of entries for expansion.
46 * 0 on success, -1 on failure.
49 mr_btree_expand(struct mlx5_mr_btree *bt, int n)
57 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
58 * used inside if there's no room to expand. Because this is a quite
59 * rare case and a part of very slow path, it is very acceptable.
60 * Initially cache_bh[] will be given practically enough space and once
61 * it is expanded, expansion wouldn't be needed again ever.
63 mem = rte_realloc(bt->table, n * sizeof(struct mlx5_mr_cache), 0);
65 /* Not an error, B-tree search will be skipped. */
66 DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
70 DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
78 * Look up LKey from given B-tree lookup table, store the last index and return
82 * Pointer to B-tree structure.
84 * Pointer to index. Even on search failure, returns index where it stops
85 * searching so that index can be used when inserting a new entry.
90 * Searched LKey on success, UINT32_MAX on no match.
93 mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
95 struct mlx5_mr_cache *lkp_tbl;
100 lkp_tbl = *bt->table;
102 /* First entry must be NULL for comparison. */
103 assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
104 lkp_tbl[0].lkey == UINT32_MAX));
107 register uint16_t delta = n >> 1;
109 if (addr < lkp_tbl[base + delta].start) {
116 assert(addr >= lkp_tbl[base].start);
118 if (addr < lkp_tbl[base].end)
119 return lkp_tbl[base].lkey;
125 * Insert an entry to B-tree lookup table.
128 * Pointer to B-tree structure.
130 * Pointer to new entry to insert.
133 * 0 on success, -1 on failure.
136 mr_btree_insert(struct mlx5_mr_btree *bt, struct mlx5_mr_cache *entry)
138 struct mlx5_mr_cache *lkp_tbl;
143 assert(bt->len <= bt->size);
145 lkp_tbl = *bt->table;
146 /* Find out the slot for insertion. */
147 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
149 "abort insertion to B-tree(%p): already exist at"
150 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
151 (void *)bt, idx, entry->start, entry->end, entry->lkey);
152 /* Already exist, return. */
155 /* If table is full, return error. */
156 if (unlikely(bt->len == bt->size)) {
162 shift = (bt->len - idx) * sizeof(struct mlx5_mr_cache);
164 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
165 lkp_tbl[idx] = *entry;
168 "inserted B-tree(%p)[%u],"
169 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
170 (void *)bt, idx, entry->start, entry->end, entry->lkey);
175 * Initialize B-tree and allocate memory for lookup table.
178 * Pointer to B-tree structure.
180 * Number of entries to allocate.
182 * NUMA socket on which memory must be allocated.
185 * 0 on success, a negative errno value otherwise and rte_errno is set.
188 mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
194 memset(bt, 0, sizeof(*bt));
195 bt->table = rte_calloc_socket("B-tree table",
196 n, sizeof(struct mlx5_mr_cache),
198 if (bt->table == NULL) {
201 "failed to allocate memory for btree cache on socket %d",
206 /* First entry must be NULL for binary search. */
207 (*bt->table)[bt->len++] = (struct mlx5_mr_cache) {
210 DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
211 (void *)bt, (void *)bt->table);
216 * Free B-tree resources.
219 * Pointer to B-tree structure.
222 mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
226 DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
227 (void *)bt, (void *)bt->table);
229 memset(bt, 0, sizeof(*bt));
233 * Dump all the entries in a B-tree
236 * Pointer to B-tree structure.
239 mlx5_mr_btree_dump(struct mlx5_mr_btree *bt)
242 struct mlx5_mr_cache *lkp_tbl;
246 lkp_tbl = *bt->table;
247 for (idx = 0; idx < bt->len; ++idx) {
248 struct mlx5_mr_cache *entry = &lkp_tbl[idx];
252 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
253 (void *)bt, idx, entry->start, entry->end, entry->lkey);
258 * Find virtually contiguous memory chunk in a given MR.
261 * Pointer to MR structure.
263 * Pointer to returning MR cache entry. If not found, this will not be
266 * Start index of the memseg bitmap.
269 * Next index to go on lookup.
272 mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
279 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
280 if (rte_bitmap_get(mr->ms_bmp, idx)) {
281 const struct rte_memseg_list *msl;
282 const struct rte_memseg *ms;
285 ms = rte_fbarray_get(&msl->memseg_arr,
286 mr->ms_base_idx + idx);
287 assert(msl->page_sz == ms->hugepage_sz);
290 end = ms->addr_64 + ms->hugepage_sz;
292 /* Passed the end of a fragment. */
297 /* Found one chunk. */
298 entry->start = start;
300 entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
306 * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
307 * Then, this entry will have to be searched by mr_lookup_dev_list() in
308 * mlx5_mr_create() on miss.
311 * Pointer to Ethernet device.
313 * Pointer to MR to insert.
316 * 0 on success, -1 on failure.
319 mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr)
321 struct priv *priv = dev->data->dev_private;
324 DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
325 dev->data->port_id, (void *)mr);
326 for (n = 0; n < mr->ms_bmp_n; ) {
327 struct mlx5_mr_cache entry = { 0, };
329 /* Find a contiguous chunk and advance the index. */
330 n = mr_find_next_chunk(mr, &entry, n);
333 if (mr_btree_insert(&priv->mr.cache, &entry) < 0) {
335 * Overflowed, but the global table cannot be expanded
336 * because of deadlock.
345 * Look up address in the original global MR list.
348 * Pointer to Ethernet device.
350 * Pointer to returning MR cache entry. If no match, this will not be updated.
355 * Found MR on match, NULL otherwise.
357 static struct mlx5_mr *
358 mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
361 struct priv *priv = dev->data->dev_private;
364 /* Iterate all the existing MRs. */
365 LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
370 for (n = 0; n < mr->ms_bmp_n; ) {
371 struct mlx5_mr_cache ret = { 0, };
373 n = mr_find_next_chunk(mr, &ret, n);
374 if (addr >= ret.start && addr < ret.end) {
385 * Look up address on device.
388 * Pointer to Ethernet device.
390 * Pointer to returning MR cache entry. If no match, this will not be updated.
395 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
398 mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
401 struct priv *priv = dev->data->dev_private;
403 uint32_t lkey = UINT32_MAX;
407 * If the global cache has overflowed since it failed to expand the
408 * B-tree table, it can't have all the existing MRs. Then, the address
409 * has to be searched by traversing the original MR list instead, which
410 * is very slow path. Otherwise, the global cache is all inclusive.
412 if (!unlikely(priv->mr.cache.overflow)) {
413 lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
414 if (lkey != UINT32_MAX)
415 *entry = (*priv->mr.cache.table)[idx];
417 /* Falling back to the slowest path. */
418 mr = mr_lookup_dev_list(dev, entry, addr);
422 assert(lkey == UINT32_MAX || (addr >= entry->start &&
428 * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
429 * can raise memory free event and the callback function will spin on the lock.
432 * Pointer to MR to free.
435 mr_free(struct mlx5_mr *mr)
439 DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
440 if (mr->ibv_mr != NULL)
441 claim_zero(mlx5_glue->dereg_mr(mr->ibv_mr));
442 if (mr->ms_bmp != NULL)
443 rte_bitmap_free(mr->ms_bmp);
448 * Releass resources of detached MR having no online entry.
451 * Pointer to Ethernet device.
454 mlx5_mr_garbage_collect(struct rte_eth_dev *dev)
456 struct priv *priv = dev->data->dev_private;
457 struct mlx5_mr *mr_next;
458 struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
460 /* Must be called from the primary process. */
461 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
463 * MR can't be freed with holding the lock because rte_free() could call
464 * memory free callback function. This will be a deadlock situation.
466 rte_rwlock_write_lock(&priv->mr.rwlock);
467 /* Detach the whole free list and release it after unlocking. */
468 free_list = priv->mr.mr_free_list;
469 LIST_INIT(&priv->mr.mr_free_list);
470 rte_rwlock_write_unlock(&priv->mr.rwlock);
471 /* Release resources. */
472 mr_next = LIST_FIRST(&free_list);
473 while (mr_next != NULL) {
474 struct mlx5_mr *mr = mr_next;
476 mr_next = LIST_NEXT(mr, mr);
481 /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
483 mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
484 const struct rte_memseg *ms, size_t len, void *arg)
486 struct mr_find_contig_memsegs_data *data = arg;
488 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
490 /* Found, save it and stop walking. */
491 data->start = ms->addr_64;
492 data->end = ms->addr_64 + len;
498 * Create a new global Memroy Region (MR) for a missing virtual address.
499 * Register entire virtually contiguous memory chunk around the address.
502 * Pointer to Ethernet device.
504 * Pointer to returning MR cache entry, found in the global cache or newly
505 * created. If failed to create one, this will not be updated.
507 * Target virtual address to register.
510 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
513 mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
516 struct priv *priv = dev->data->dev_private;
517 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
518 const struct rte_memseg_list *msl;
519 const struct rte_memseg *ms;
520 struct mlx5_mr *mr = NULL;
525 int ms_idx_shift = -1;
527 struct mr_find_contig_memsegs_data data = {
530 struct mr_find_contig_memsegs_data data_re;
532 DRV_LOG(DEBUG, "port %u creating a MR using address (%p)",
533 dev->data->port_id, (void *)addr);
534 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
536 "port %u using address (%p) of unregistered mempool"
537 " in secondary process, please create mempool"
538 " before rte_eth_dev_start()",
539 dev->data->port_id, (void *)addr);
544 * Release detached MRs if any. This can't be called with holding either
545 * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have
546 * been detached by the memory free event but it couldn't be released
547 * inside the callback due to deadlock. As a result, releasing resources
548 * is quite opportunistic.
550 mlx5_mr_garbage_collect(dev);
552 * Find out a contiguous virtual address chunk in use, to which the
553 * given address belongs, in order to register maximum range. In the
554 * best case where mempools are not dynamically recreated and
555 * '--socket-mem' is speicified as an EAL option, it is very likely to
556 * have only one MR(LKey) per a socket and per a hugepage-size even
557 * though the system memory is highly fragmented.
559 if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
561 "port %u unable to find virtually contiguous"
562 " chunk for address (%p)."
563 " rte_memseg_contig_walk() failed.",
564 dev->data->port_id, (void *)addr);
569 /* Addresses must be page-aligned. */
570 assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
571 assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
573 ms = rte_mem_virt2memseg((void *)data.start, msl);
574 len = data.end - data.start;
575 assert(msl->page_sz == ms->hugepage_sz);
576 /* Number of memsegs in the range. */
577 ms_n = len / msl->page_sz;
579 "port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
580 " page_sz=0x%" PRIx64 ", ms_n=%u",
581 dev->data->port_id, (void *)addr,
582 data.start, data.end, msl->page_sz, ms_n);
583 /* Size of memory for bitmap. */
584 bmp_size = rte_bitmap_get_memory_footprint(ms_n);
585 mr = rte_zmalloc_socket(NULL,
586 RTE_ALIGN_CEIL(sizeof(*mr),
587 RTE_CACHE_LINE_SIZE) +
589 RTE_CACHE_LINE_SIZE, msl->socket_id);
592 "port %u unable to allocate memory for a new MR of"
594 dev->data->port_id, (void *)addr);
600 * Save the index of the first memseg and initialize memseg bitmap. To
601 * see if a memseg of ms_idx in the memseg-list is still valid, check:
602 * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
604 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
605 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
606 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
607 if (mr->ms_bmp == NULL) {
609 "port %u unable to initialize bitamp for a new MR of"
611 dev->data->port_id, (void *)addr);
616 * Should recheck whether the extended contiguous chunk is still valid.
617 * Because memory_hotplug_lock can't be held if there's any memory
618 * related calls in a critical path, resource allocation above can't be
619 * locked. If the memory has been changed at this point, try again with
620 * just single page. If not, go on with the big chunk atomically from
623 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
625 if (len > msl->page_sz &&
626 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
628 "port %u unable to find virtually contiguous"
629 " chunk for address (%p)."
630 " rte_memseg_contig_walk() failed.",
631 dev->data->port_id, (void *)addr);
635 if (data.start != data_re.start || data.end != data_re.end) {
637 * The extended contiguous chunk has been changed. Try again
638 * with single memseg instead.
640 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
641 data.end = data.start + msl->page_sz;
642 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
644 goto alloc_resources;
646 assert(data.msl == data_re.msl);
647 rte_rwlock_write_lock(&priv->mr.rwlock);
649 * Check the address is really missing. If other thread already created
650 * one or it is not found due to overflow, abort and return.
652 if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
654 * Insert to the global cache table. It may fail due to
655 * low-on-memory. Then, this entry will have to be searched
658 mr_btree_insert(&priv->mr.cache, entry);
660 "port %u found MR for %p on final lookup, abort",
661 dev->data->port_id, (void *)addr);
662 rte_rwlock_write_unlock(&priv->mr.rwlock);
663 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
665 * Must be unlocked before calling rte_free() because
666 * mlx5_mr_mem_event_free_cb() can be called inside.
672 * Trim start and end addresses for verbs MR. Set bits for registering
673 * memsegs but exclude already registered ones. Bitmap can be
676 for (n = 0; n < ms_n; ++n) {
678 struct mlx5_mr_cache ret = { 0, };
680 start = data_re.start + n * msl->page_sz;
681 /* Exclude memsegs already registered by other MRs. */
682 if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
684 * Start from the first unregistered memseg in the
687 if (ms_idx_shift == -1) {
688 mr->ms_base_idx += n;
692 data.end = start + msl->page_sz;
693 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
697 len = data.end - data.start;
698 mr->ms_bmp_n = len / msl->page_sz;
699 assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
701 * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
702 * called with holding the memory lock because it doesn't use
703 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
704 * through mlx5_alloc_verbs_buf().
706 mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
707 IBV_ACCESS_LOCAL_WRITE);
708 if (mr->ibv_mr == NULL) {
710 "port %u fail to create a verbs MR for address (%p)",
711 dev->data->port_id, (void *)addr);
715 assert((uintptr_t)mr->ibv_mr->addr == data.start);
716 assert(mr->ibv_mr->length == len);
717 LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
719 "port %u MR CREATED (%p) for %p:\n"
720 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
721 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
722 dev->data->port_id, (void *)mr, (void *)addr,
723 data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
724 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
725 /* Insert to the global cache table. */
726 mr_insert_dev_cache(dev, mr);
727 /* Fill in output data. */
728 mr_lookup_dev(dev, entry, addr);
729 /* Lookup can't fail. */
730 assert(entry->lkey != UINT32_MAX);
731 rte_rwlock_write_unlock(&priv->mr.rwlock);
732 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
735 rte_rwlock_write_unlock(&priv->mr.rwlock);
737 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
740 * In case of error, as this can be called in a datapath, a warning
741 * message per an error is preferable instead. Must be unlocked before
742 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
750 * Rebuild the global B-tree cache of device from the original MR list.
753 * Pointer to Ethernet device.
756 mr_rebuild_dev_cache(struct rte_eth_dev *dev)
758 struct priv *priv = dev->data->dev_private;
761 DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id);
762 /* Flush cache to rebuild. */
763 priv->mr.cache.len = 1;
764 priv->mr.cache.overflow = 0;
765 /* Iterate all the existing MRs. */
766 LIST_FOREACH(mr, &priv->mr.mr_list, mr)
767 if (mr_insert_dev_cache(dev, mr) < 0)
772 * Callback for memory free event. Iterate freed memsegs and check whether it
773 * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
774 * result, the MR would be fragmented. If it becomes empty, the MR will be freed
775 * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
776 * secondary process, the garbage collector will be called in primary process
777 * as the secondary process can't call mlx5_mr_create().
779 * The global cache must be rebuilt if there's any change and this event has to
780 * be propagated to dataplane threads to flush the local caches.
783 * Pointer to Ethernet device.
785 * Address of freed memory.
787 * Size of freed memory.
790 mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
792 struct priv *priv = dev->data->dev_private;
793 const struct rte_memseg_list *msl;
799 DRV_LOG(DEBUG, "port %u free callback: addr=%p, len=%zu",
800 dev->data->port_id, addr, len);
801 msl = rte_mem_virt2memseg_list(addr);
802 /* addr and len must be page-aligned. */
803 assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
804 assert(len == RTE_ALIGN(len, msl->page_sz));
805 ms_n = len / msl->page_sz;
806 rte_rwlock_write_lock(&priv->mr.rwlock);
807 /* Clear bits of freed memsegs from MR. */
808 for (i = 0; i < ms_n; ++i) {
809 const struct rte_memseg *ms;
810 struct mlx5_mr_cache entry;
815 /* Find MR having this memseg. */
816 start = (uintptr_t)addr + i * msl->page_sz;
817 mr = mr_lookup_dev_list(dev, &entry, start);
820 ms = rte_mem_virt2memseg((void *)start, msl);
822 assert(msl->page_sz == ms->hugepage_sz);
823 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
824 pos = ms_idx - mr->ms_base_idx;
825 assert(rte_bitmap_get(mr->ms_bmp, pos));
826 assert(pos < mr->ms_bmp_n);
827 DRV_LOG(DEBUG, "port %u MR(%p): clear bitmap[%u] for addr %p",
828 dev->data->port_id, (void *)mr, pos, (void *)start);
829 rte_bitmap_clear(mr->ms_bmp, pos);
830 if (--mr->ms_n == 0) {
832 LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
833 DRV_LOG(DEBUG, "port %u remove MR(%p) from list",
834 dev->data->port_id, (void *)mr);
837 * MR is fragmented or will be freed. the global cache must be
843 mr_rebuild_dev_cache(dev);
845 * Flush local caches by propagating invalidation across cores.
846 * rte_smp_wmb() is enough to synchronize this event. If one of
847 * freed memsegs is seen by other core, that means the memseg
848 * has been allocated by allocator, which will come after this
849 * free call. Therefore, this store instruction (incrementing
850 * generation below) will be guaranteed to be seen by other core
851 * before the core sees the newly allocated memory.
854 DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
858 rte_rwlock_write_unlock(&priv->mr.rwlock);
859 if (rebuild && rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
860 mlx5_mr_dump_dev(dev);
864 * Callback for memory event. This can be called from both primary and secondary
875 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
876 size_t len, void *arg __rte_unused)
879 struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
881 switch (event_type) {
882 case RTE_MEM_EVENT_FREE:
883 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
884 /* Iterate all the existing mlx5 devices. */
885 LIST_FOREACH(priv, dev_list, mem_event_cb)
886 mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
887 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
889 case RTE_MEM_EVENT_ALLOC:
896 * Look up address in the global MR cache table. If not found, create a new MR.
897 * Insert the found/created entry to local bottom-half cache table.
900 * Pointer to Ethernet device.
902 * Pointer to per-queue MR control structure.
904 * Pointer to returning MR cache entry, found in the global cache or newly
905 * created. If failed to create one, this is not written.
910 * Searched LKey on success, UINT32_MAX on no match.
913 mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
914 struct mlx5_mr_cache *entry, uintptr_t addr)
916 struct priv *priv = dev->data->dev_private;
917 struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
921 /* If local cache table is full, try to double it. */
922 if (unlikely(bt->len == bt->size))
923 mr_btree_expand(bt, bt->size << 1);
924 /* Look up in the global cache. */
925 rte_rwlock_read_lock(&priv->mr.rwlock);
926 lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
927 if (lkey != UINT32_MAX) {
929 *entry = (*priv->mr.cache.table)[idx];
930 rte_rwlock_read_unlock(&priv->mr.rwlock);
932 * Update local cache. Even if it fails, return the found entry
933 * to update top-half cache. Next time, this entry will be found
934 * in the global cache.
936 mr_btree_insert(bt, entry);
939 rte_rwlock_read_unlock(&priv->mr.rwlock);
940 /* First time to see the address? Create a new MR. */
941 lkey = mlx5_mr_create(dev, entry, addr);
943 * Update the local cache if successfully created a new global MR. Even
944 * if failed to create one, there's no action to take in this datapath
945 * code. As returning LKey is invalid, this will eventually make HW
948 if (lkey != UINT32_MAX)
949 mr_btree_insert(bt, entry);
954 * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
955 * misses, search in the global MR cache table and update the new entry to
956 * per-queue local caches.
959 * Pointer to Ethernet device.
961 * Pointer to per-queue MR control structure.
966 * Searched LKey on success, UINT32_MAX on no match.
969 mlx5_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
974 /* Victim in top-half cache to replace with new entry. */
975 struct mlx5_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
977 /* Binary-search MR translation table. */
978 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
979 /* Update top-half cache. */
980 if (likely(lkey != UINT32_MAX)) {
981 *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
984 * If missed in local lookup table, search in the global cache
985 * and local cache_bh[] will be updated inside if possible.
986 * Top-half cache entry will also be updated.
988 lkey = mlx5_mr_lookup_dev(dev, mr_ctrl, repl, addr);
989 if (unlikely(lkey == UINT32_MAX))
992 /* Update the most recently used entry. */
993 mr_ctrl->mru = mr_ctrl->head;
994 /* Point to the next victim, the oldest. */
995 mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1000 * Bottom-half of LKey search on Rx.
1003 * Pointer to Rx queue structure.
1008 * Searched LKey on success, UINT32_MAX on no match.
1011 mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
1013 struct mlx5_rxq_ctrl *rxq_ctrl =
1014 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1015 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
1016 struct priv *priv = rxq_ctrl->priv;
1019 "Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
1020 rxq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
1021 return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1025 * Bottom-half of LKey search on Tx.
1028 * Pointer to Tx queue structure.
1033 * Searched LKey on success, UINT32_MAX on no match.
1036 mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
1038 struct mlx5_txq_ctrl *txq_ctrl =
1039 container_of(txq, struct mlx5_txq_ctrl, txq);
1040 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
1041 struct priv *priv = txq_ctrl->priv;
1044 "Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
1045 txq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
1046 return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1050 * Flush all of the local cache entries.
1053 * Pointer to per-queue MR control structure.
1056 mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1058 /* Reset the most-recently-used index. */
1060 /* Reset the linear search array. */
1062 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1063 /* Reset the B-tree table. */
1064 mr_ctrl->cache_bh.len = 1;
1065 mr_ctrl->cache_bh.overflow = 0;
1066 /* Update the generation number. */
1067 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1068 DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1069 (void *)mr_ctrl, mr_ctrl->cur_gen);
1072 /* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
1074 mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
1075 struct rte_mempool_memhdr *memhdr,
1076 unsigned mem_idx __rte_unused)
1078 struct mr_update_mp_data *data = opaque;
1081 /* Stop iteration if failed in the previous walk. */
1084 /* Register address of the chunk and update local caches. */
1085 lkey = mlx5_mr_addr2mr_bh(data->dev, data->mr_ctrl,
1086 (uintptr_t)memhdr->addr);
1087 if (lkey == UINT32_MAX)
1092 * Register entire memory chunks in a Mempool.
1095 * Pointer to Ethernet device.
1097 * Pointer to per-queue MR control structure.
1099 * Pointer to registering Mempool.
1102 * 0 on success, -1 on failure.
1105 mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
1106 struct rte_mempool *mp)
1108 struct mr_update_mp_data data = {
1114 rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
1119 * Dump all the created MRs and the global cache entries.
1122 * Pointer to Ethernet device.
1125 mlx5_mr_dump_dev(struct rte_eth_dev *dev)
1127 struct priv *priv = dev->data->dev_private;
1132 rte_rwlock_read_lock(&priv->mr.rwlock);
1133 /* Iterate all the existing MRs. */
1134 LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
1138 "port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
1139 dev->data->port_id, mr_n++,
1140 rte_cpu_to_be_32(mr->ibv_mr->lkey),
1141 mr->ms_n, mr->ms_bmp_n);
1144 for (n = 0; n < mr->ms_bmp_n; ) {
1145 struct mlx5_mr_cache ret = { 0, };
1147 n = mr_find_next_chunk(mr, &ret, n);
1151 " chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1152 chunk_n++, ret.start, ret.end);
1155 DRV_LOG(DEBUG, "port %u dumping global cache", dev->data->port_id);
1156 mlx5_mr_btree_dump(&priv->mr.cache);
1157 rte_rwlock_read_unlock(&priv->mr.rwlock);
1161 * Release all the created MRs and resources. Remove device from memory callback
1165 * Pointer to Ethernet device.
1168 mlx5_mr_release(struct rte_eth_dev *dev)
1170 struct priv *priv = dev->data->dev_private;
1171 struct mlx5_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
1173 /* Remove from memory callback device list. */
1174 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
1175 LIST_REMOVE(priv, mem_event_cb);
1176 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
1177 if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
1178 mlx5_mr_dump_dev(dev);
1179 rte_rwlock_write_lock(&priv->mr.rwlock);
1180 /* Detach from MR list and move to free list. */
1181 while (mr_next != NULL) {
1182 struct mlx5_mr *mr = mr_next;
1184 mr_next = LIST_NEXT(mr, mr);
1185 LIST_REMOVE(mr, mr);
1186 LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
1188 LIST_INIT(&priv->mr.mr_list);
1189 /* Free global cache. */
1190 mlx5_mr_btree_free(&priv->mr.cache);
1191 rte_rwlock_write_unlock(&priv->mr.rwlock);
1192 /* Free all remaining MRs. */
1193 mlx5_mr_garbage_collect(dev);