1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
7 #pragma GCC diagnostic ignored "-Wpedantic"
9 #include <infiniband/verbs.h>
11 #pragma GCC diagnostic error "-Wpedantic"
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_rwlock.h>
17 #include <rte_bus_pci.h>
21 #include "mlx5_rxtx.h"
22 #include "mlx5_glue.h"
24 struct mr_find_contig_memsegs_data {
28 const struct rte_memseg_list *msl;
31 struct mr_update_mp_data {
32 struct rte_eth_dev *dev;
33 struct mlx5_mr_ctrl *mr_ctrl;
38 * Expand B-tree table to a given size. Can't be called with holding
39 * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc().
42 * Pointer to B-tree structure.
44 * Number of entries for expansion.
47 * 0 on success, -1 on failure.
50 mr_btree_expand(struct mlx5_mr_btree *bt, int n)
58 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
59 * used inside if there's no room to expand. Because this is a quite
60 * rare case and a part of very slow path, it is very acceptable.
61 * Initially cache_bh[] will be given practically enough space and once
62 * it is expanded, expansion wouldn't be needed again ever.
64 mem = rte_realloc(bt->table, n * sizeof(struct mlx5_mr_cache), 0);
66 /* Not an error, B-tree search will be skipped. */
67 DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
71 DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
79 * Look up LKey from given B-tree lookup table, store the last index and return
83 * Pointer to B-tree structure.
85 * Pointer to index. Even on search failure, returns index where it stops
86 * searching so that index can be used when inserting a new entry.
91 * Searched LKey on success, UINT32_MAX on no match.
94 mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
96 struct mlx5_mr_cache *lkp_tbl;
101 lkp_tbl = *bt->table;
103 /* First entry must be NULL for comparison. */
104 assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
105 lkp_tbl[0].lkey == UINT32_MAX));
108 register uint16_t delta = n >> 1;
110 if (addr < lkp_tbl[base + delta].start) {
117 assert(addr >= lkp_tbl[base].start);
119 if (addr < lkp_tbl[base].end)
120 return lkp_tbl[base].lkey;
126 * Insert an entry to B-tree lookup table.
129 * Pointer to B-tree structure.
131 * Pointer to new entry to insert.
134 * 0 on success, -1 on failure.
137 mr_btree_insert(struct mlx5_mr_btree *bt, struct mlx5_mr_cache *entry)
139 struct mlx5_mr_cache *lkp_tbl;
144 assert(bt->len <= bt->size);
146 lkp_tbl = *bt->table;
147 /* Find out the slot for insertion. */
148 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
150 "abort insertion to B-tree(%p): already exist at"
151 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
152 (void *)bt, idx, entry->start, entry->end, entry->lkey);
153 /* Already exist, return. */
156 /* If table is full, return error. */
157 if (unlikely(bt->len == bt->size)) {
163 shift = (bt->len - idx) * sizeof(struct mlx5_mr_cache);
165 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
166 lkp_tbl[idx] = *entry;
169 "inserted B-tree(%p)[%u],"
170 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
171 (void *)bt, idx, entry->start, entry->end, entry->lkey);
176 * Initialize B-tree and allocate memory for lookup table.
179 * Pointer to B-tree structure.
181 * Number of entries to allocate.
183 * NUMA socket on which memory must be allocated.
186 * 0 on success, a negative errno value otherwise and rte_errno is set.
189 mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
195 assert(!bt->table && !bt->size);
196 memset(bt, 0, sizeof(*bt));
197 bt->table = rte_calloc_socket("B-tree table",
198 n, sizeof(struct mlx5_mr_cache),
200 if (bt->table == NULL) {
202 DEBUG("failed to allocate memory for btree cache on socket %d",
207 /* First entry must be NULL for binary search. */
208 (*bt->table)[bt->len++] = (struct mlx5_mr_cache) {
211 DEBUG("initialized B-tree %p with table %p",
212 (void *)bt, (void *)bt->table);
217 * Free B-tree resources.
220 * Pointer to B-tree structure.
223 mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
227 DEBUG("freeing B-tree %p with table %p",
228 (void *)bt, (void *)bt->table);
230 memset(bt, 0, sizeof(*bt));
234 * Dump all the entries in a B-tree
237 * Pointer to B-tree structure.
240 mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
244 struct mlx5_mr_cache *lkp_tbl;
248 lkp_tbl = *bt->table;
249 for (idx = 0; idx < bt->len; ++idx) {
250 struct mlx5_mr_cache *entry = &lkp_tbl[idx];
252 DEBUG("B-tree(%p)[%u],"
253 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
254 (void *)bt, idx, entry->start, entry->end, entry->lkey);
260 * Find virtually contiguous memory chunk in a given MR.
263 * Pointer to MR structure.
265 * Pointer to returning MR cache entry. If not found, this will not be
268 * Start index of the memseg bitmap.
271 * Next index to go on lookup.
274 mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
281 /* MR for external memory doesn't have memseg list. */
282 if (mr->msl == NULL) {
283 struct ibv_mr *ibv_mr = mr->ibv_mr;
285 assert(mr->ms_bmp_n == 1);
286 assert(mr->ms_n == 1);
287 assert(base_idx == 0);
289 * Can't search it from memseg list but get it directly from
290 * verbs MR as there's only one chunk.
292 entry->start = (uintptr_t)ibv_mr->addr;
293 entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length;
294 entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
295 /* Returning 1 ends iteration. */
298 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
299 if (rte_bitmap_get(mr->ms_bmp, idx)) {
300 const struct rte_memseg_list *msl;
301 const struct rte_memseg *ms;
304 ms = rte_fbarray_get(&msl->memseg_arr,
305 mr->ms_base_idx + idx);
306 assert(msl->page_sz == ms->hugepage_sz);
309 end = ms->addr_64 + ms->hugepage_sz;
311 /* Passed the end of a fragment. */
316 /* Found one chunk. */
317 entry->start = start;
319 entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
325 * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
326 * Then, this entry will have to be searched by mr_lookup_dev_list() in
327 * mlx5_mr_create() on miss.
330 * Pointer to Ethernet device.
332 * Pointer to MR to insert.
335 * 0 on success, -1 on failure.
338 mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr)
340 struct mlx5_priv *priv = dev->data->dev_private;
343 DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
344 dev->data->port_id, (void *)mr);
345 for (n = 0; n < mr->ms_bmp_n; ) {
346 struct mlx5_mr_cache entry;
348 memset(&entry, 0, sizeof(entry));
349 /* Find a contiguous chunk and advance the index. */
350 n = mr_find_next_chunk(mr, &entry, n);
353 if (mr_btree_insert(&priv->mr.cache, &entry) < 0) {
355 * Overflowed, but the global table cannot be expanded
356 * because of deadlock.
365 * Look up address in the original global MR list.
368 * Pointer to Ethernet device.
370 * Pointer to returning MR cache entry. If no match, this will not be updated.
375 * Found MR on match, NULL otherwise.
377 static struct mlx5_mr *
378 mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
381 struct mlx5_priv *priv = dev->data->dev_private;
384 /* Iterate all the existing MRs. */
385 LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
390 for (n = 0; n < mr->ms_bmp_n; ) {
391 struct mlx5_mr_cache ret;
393 memset(&ret, 0, sizeof(ret));
394 n = mr_find_next_chunk(mr, &ret, n);
395 if (addr >= ret.start && addr < ret.end) {
406 * Look up address on device.
409 * Pointer to Ethernet device.
411 * Pointer to returning MR cache entry. If no match, this will not be updated.
416 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
419 mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
422 struct mlx5_priv *priv = dev->data->dev_private;
424 uint32_t lkey = UINT32_MAX;
428 * If the global cache has overflowed since it failed to expand the
429 * B-tree table, it can't have all the existing MRs. Then, the address
430 * has to be searched by traversing the original MR list instead, which
431 * is very slow path. Otherwise, the global cache is all inclusive.
433 if (!unlikely(priv->mr.cache.overflow)) {
434 lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
435 if (lkey != UINT32_MAX)
436 *entry = (*priv->mr.cache.table)[idx];
438 /* Falling back to the slowest path. */
439 mr = mr_lookup_dev_list(dev, entry, addr);
443 assert(lkey == UINT32_MAX || (addr >= entry->start &&
449 * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
450 * can raise memory free event and the callback function will spin on the lock.
453 * Pointer to MR to free.
456 mr_free(struct mlx5_mr *mr)
460 DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
461 if (mr->ibv_mr != NULL)
462 claim_zero(mlx5_glue->dereg_mr(mr->ibv_mr));
463 if (mr->ms_bmp != NULL)
464 rte_bitmap_free(mr->ms_bmp);
469 * Releass resources of detached MR having no online entry.
472 * Pointer to Ethernet device.
475 mlx5_mr_garbage_collect(struct rte_eth_dev *dev)
477 struct mlx5_priv *priv = dev->data->dev_private;
478 struct mlx5_mr *mr_next;
479 struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
481 /* Must be called from the primary process. */
482 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
484 * MR can't be freed with holding the lock because rte_free() could call
485 * memory free callback function. This will be a deadlock situation.
487 rte_rwlock_write_lock(&priv->mr.rwlock);
488 /* Detach the whole free list and release it after unlocking. */
489 free_list = priv->mr.mr_free_list;
490 LIST_INIT(&priv->mr.mr_free_list);
491 rte_rwlock_write_unlock(&priv->mr.rwlock);
492 /* Release resources. */
493 mr_next = LIST_FIRST(&free_list);
494 while (mr_next != NULL) {
495 struct mlx5_mr *mr = mr_next;
497 mr_next = LIST_NEXT(mr, mr);
502 /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
504 mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
505 const struct rte_memseg *ms, size_t len, void *arg)
507 struct mr_find_contig_memsegs_data *data = arg;
509 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
511 /* Found, save it and stop walking. */
512 data->start = ms->addr_64;
513 data->end = ms->addr_64 + len;
519 * Create a new global Memroy Region (MR) for a missing virtual address.
520 * Register entire virtually contiguous memory chunk around the address.
523 * Pointer to Ethernet device.
525 * Pointer to returning MR cache entry, found in the global cache or newly
526 * created. If failed to create one, this will not be updated.
528 * Target virtual address to register.
531 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
534 mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
537 struct mlx5_priv *priv = dev->data->dev_private;
538 struct mlx5_dev_config *config = &priv->config;
539 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
540 const struct rte_memseg_list *msl;
541 const struct rte_memseg *ms;
542 struct mlx5_mr *mr = NULL;
547 int ms_idx_shift = -1;
549 struct mr_find_contig_memsegs_data data = {
552 struct mr_find_contig_memsegs_data data_re;
554 DRV_LOG(DEBUG, "port %u creating a MR using address (%p)",
555 dev->data->port_id, (void *)addr);
556 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
558 "port %u using address (%p) of unregistered mempool"
559 " in secondary process, please create mempool"
560 " before rte_eth_dev_start()",
561 dev->data->port_id, (void *)addr);
566 * Release detached MRs if any. This can't be called with holding either
567 * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have
568 * been detached by the memory free event but it couldn't be released
569 * inside the callback due to deadlock. As a result, releasing resources
570 * is quite opportunistic.
572 mlx5_mr_garbage_collect(dev);
574 * If enabled, find out a contiguous virtual address chunk in use, to
575 * which the given address belongs, in order to register maximum range.
576 * In the best case where mempools are not dynamically recreated and
577 * '--socket-mem' is specified as an EAL option, it is very likely to
578 * have only one MR(LKey) per a socket and per a hugepage-size even
579 * though the system memory is highly fragmented. As the whole memory
580 * chunk will be pinned by kernel, it can't be reused unless entire
581 * chunk is freed from EAL.
583 * If disabled, just register one memseg (page). Then, memory
584 * consumption will be minimized but it may drop performance if there
585 * are many MRs to lookup on the datapath.
587 if (!config->mr_ext_memseg_en) {
588 data.msl = rte_mem_virt2memseg_list((void *)addr);
589 data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
590 data.end = data.start + data.msl->page_sz;
591 } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
593 "port %u unable to find virtually contiguous"
594 " chunk for address (%p)."
595 " rte_memseg_contig_walk() failed.",
596 dev->data->port_id, (void *)addr);
601 /* Addresses must be page-aligned. */
602 assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
603 assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
605 ms = rte_mem_virt2memseg((void *)data.start, msl);
606 len = data.end - data.start;
607 assert(msl->page_sz == ms->hugepage_sz);
608 /* Number of memsegs in the range. */
609 ms_n = len / msl->page_sz;
610 DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
611 " page_sz=0x%" PRIx64 ", ms_n=%u",
612 dev->data->port_id, (void *)addr,
613 data.start, data.end, msl->page_sz, ms_n);
614 /* Size of memory for bitmap. */
615 bmp_size = rte_bitmap_get_memory_footprint(ms_n);
616 mr = rte_zmalloc_socket(NULL,
617 RTE_ALIGN_CEIL(sizeof(*mr),
618 RTE_CACHE_LINE_SIZE) +
620 RTE_CACHE_LINE_SIZE, msl->socket_id);
622 DEBUG("port %u unable to allocate memory for a new MR of"
624 dev->data->port_id, (void *)addr);
630 * Save the index of the first memseg and initialize memseg bitmap. To
631 * see if a memseg of ms_idx in the memseg-list is still valid, check:
632 * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
634 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
635 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
636 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
637 if (mr->ms_bmp == NULL) {
638 DEBUG("port %u unable to initialize bitamp for a new MR of"
640 dev->data->port_id, (void *)addr);
645 * Should recheck whether the extended contiguous chunk is still valid.
646 * Because memory_hotplug_lock can't be held if there's any memory
647 * related calls in a critical path, resource allocation above can't be
648 * locked. If the memory has been changed at this point, try again with
649 * just single page. If not, go on with the big chunk atomically from
652 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
654 if (len > msl->page_sz &&
655 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
656 DEBUG("port %u unable to find virtually contiguous"
657 " chunk for address (%p)."
658 " rte_memseg_contig_walk() failed.",
659 dev->data->port_id, (void *)addr);
663 if (data.start != data_re.start || data.end != data_re.end) {
665 * The extended contiguous chunk has been changed. Try again
666 * with single memseg instead.
668 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
669 data.end = data.start + msl->page_sz;
670 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
672 goto alloc_resources;
674 assert(data.msl == data_re.msl);
675 rte_rwlock_write_lock(&priv->mr.rwlock);
677 * Check the address is really missing. If other thread already created
678 * one or it is not found due to overflow, abort and return.
680 if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
682 * Insert to the global cache table. It may fail due to
683 * low-on-memory. Then, this entry will have to be searched
686 mr_btree_insert(&priv->mr.cache, entry);
687 DEBUG("port %u found MR for %p on final lookup, abort",
688 dev->data->port_id, (void *)addr);
689 rte_rwlock_write_unlock(&priv->mr.rwlock);
690 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
692 * Must be unlocked before calling rte_free() because
693 * mlx5_mr_mem_event_free_cb() can be called inside.
699 * Trim start and end addresses for verbs MR. Set bits for registering
700 * memsegs but exclude already registered ones. Bitmap can be
703 for (n = 0; n < ms_n; ++n) {
705 struct mlx5_mr_cache ret;
707 memset(&ret, 0, sizeof(ret));
708 start = data_re.start + n * msl->page_sz;
709 /* Exclude memsegs already registered by other MRs. */
710 if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
712 * Start from the first unregistered memseg in the
715 if (ms_idx_shift == -1) {
716 mr->ms_base_idx += n;
720 data.end = start + msl->page_sz;
721 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
725 len = data.end - data.start;
726 mr->ms_bmp_n = len / msl->page_sz;
727 assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
729 * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
730 * called with holding the memory lock because it doesn't use
731 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
732 * through mlx5_alloc_verbs_buf().
734 mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)data.start, len,
735 IBV_ACCESS_LOCAL_WRITE);
736 if (mr->ibv_mr == NULL) {
737 DEBUG("port %u fail to create a verbs MR for address (%p)",
738 dev->data->port_id, (void *)addr);
742 assert((uintptr_t)mr->ibv_mr->addr == data.start);
743 assert(mr->ibv_mr->length == len);
744 LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
745 DEBUG("port %u MR CREATED (%p) for %p:\n"
746 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
747 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
748 dev->data->port_id, (void *)mr, (void *)addr,
749 data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
750 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
751 /* Insert to the global cache table. */
752 mr_insert_dev_cache(dev, mr);
753 /* Fill in output data. */
754 mr_lookup_dev(dev, entry, addr);
755 /* Lookup can't fail. */
756 assert(entry->lkey != UINT32_MAX);
757 rte_rwlock_write_unlock(&priv->mr.rwlock);
758 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
761 rte_rwlock_write_unlock(&priv->mr.rwlock);
763 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
766 * In case of error, as this can be called in a datapath, a warning
767 * message per an error is preferable instead. Must be unlocked before
768 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
776 * Rebuild the global B-tree cache of device from the original MR list.
779 * Pointer to Ethernet device.
782 mr_rebuild_dev_cache(struct rte_eth_dev *dev)
784 struct mlx5_priv *priv = dev->data->dev_private;
787 DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id);
788 /* Flush cache to rebuild. */
789 priv->mr.cache.len = 1;
790 priv->mr.cache.overflow = 0;
791 /* Iterate all the existing MRs. */
792 LIST_FOREACH(mr, &priv->mr.mr_list, mr)
793 if (mr_insert_dev_cache(dev, mr) < 0)
798 * Callback for memory free event. Iterate freed memsegs and check whether it
799 * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
800 * result, the MR would be fragmented. If it becomes empty, the MR will be freed
801 * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
802 * secondary process, the garbage collector will be called in primary process
803 * as the secondary process can't call mlx5_mr_create().
805 * The global cache must be rebuilt if there's any change and this event has to
806 * be propagated to dataplane threads to flush the local caches.
809 * Pointer to Ethernet device.
811 * Address of freed memory.
813 * Size of freed memory.
816 mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
818 struct mlx5_priv *priv = dev->data->dev_private;
819 const struct rte_memseg_list *msl;
825 DEBUG("port %u free callback: addr=%p, len=%zu",
826 dev->data->port_id, addr, len);
827 msl = rte_mem_virt2memseg_list(addr);
828 /* addr and len must be page-aligned. */
829 assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
830 assert(len == RTE_ALIGN(len, msl->page_sz));
831 ms_n = len / msl->page_sz;
832 rte_rwlock_write_lock(&priv->mr.rwlock);
833 /* Clear bits of freed memsegs from MR. */
834 for (i = 0; i < ms_n; ++i) {
835 const struct rte_memseg *ms;
836 struct mlx5_mr_cache entry;
841 /* Find MR having this memseg. */
842 start = (uintptr_t)addr + i * msl->page_sz;
843 mr = mr_lookup_dev_list(dev, &entry, start);
846 assert(mr->msl); /* Can't be external memory. */
847 ms = rte_mem_virt2memseg((void *)start, msl);
849 assert(msl->page_sz == ms->hugepage_sz);
850 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
851 pos = ms_idx - mr->ms_base_idx;
852 assert(rte_bitmap_get(mr->ms_bmp, pos));
853 assert(pos < mr->ms_bmp_n);
854 DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
855 dev->data->port_id, (void *)mr, pos, (void *)start);
856 rte_bitmap_clear(mr->ms_bmp, pos);
857 if (--mr->ms_n == 0) {
859 LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
860 DEBUG("port %u remove MR(%p) from list",
861 dev->data->port_id, (void *)mr);
864 * MR is fragmented or will be freed. the global cache must be
870 mr_rebuild_dev_cache(dev);
872 * Flush local caches by propagating invalidation across cores.
873 * rte_smp_wmb() is enough to synchronize this event. If one of
874 * freed memsegs is seen by other core, that means the memseg
875 * has been allocated by allocator, which will come after this
876 * free call. Therefore, this store instruction (incrementing
877 * generation below) will be guaranteed to be seen by other core
878 * before the core sees the newly allocated memory.
881 DEBUG("broadcasting local cache flush, gen=%d",
885 rte_rwlock_write_unlock(&priv->mr.rwlock);
889 * Callback for memory event. This can be called from both primary and secondary
900 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
901 size_t len, void *arg __rte_unused)
903 struct mlx5_priv *priv;
904 struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
906 /* Must be called from the primary process. */
907 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
908 switch (event_type) {
909 case RTE_MEM_EVENT_FREE:
910 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
911 /* Iterate all the existing mlx5 devices. */
912 LIST_FOREACH(priv, dev_list, mem_event_cb)
913 mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
914 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
916 case RTE_MEM_EVENT_ALLOC:
923 * Look up address in the global MR cache table. If not found, create a new MR.
924 * Insert the found/created entry to local bottom-half cache table.
927 * Pointer to Ethernet device.
929 * Pointer to per-queue MR control structure.
931 * Pointer to returning MR cache entry, found in the global cache or newly
932 * created. If failed to create one, this is not written.
937 * Searched LKey on success, UINT32_MAX on no match.
940 mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
941 struct mlx5_mr_cache *entry, uintptr_t addr)
943 struct mlx5_priv *priv = dev->data->dev_private;
944 struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
948 /* If local cache table is full, try to double it. */
949 if (unlikely(bt->len == bt->size))
950 mr_btree_expand(bt, bt->size << 1);
951 /* Look up in the global cache. */
952 rte_rwlock_read_lock(&priv->mr.rwlock);
953 lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
954 if (lkey != UINT32_MAX) {
956 *entry = (*priv->mr.cache.table)[idx];
957 rte_rwlock_read_unlock(&priv->mr.rwlock);
959 * Update local cache. Even if it fails, return the found entry
960 * to update top-half cache. Next time, this entry will be found
961 * in the global cache.
963 mr_btree_insert(bt, entry);
966 rte_rwlock_read_unlock(&priv->mr.rwlock);
967 /* First time to see the address? Create a new MR. */
968 lkey = mlx5_mr_create(dev, entry, addr);
970 * Update the local cache if successfully created a new global MR. Even
971 * if failed to create one, there's no action to take in this datapath
972 * code. As returning LKey is invalid, this will eventually make HW
975 if (lkey != UINT32_MAX)
976 mr_btree_insert(bt, entry);
981 * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
982 * misses, search in the global MR cache table and update the new entry to
983 * per-queue local caches.
986 * Pointer to Ethernet device.
988 * Pointer to per-queue MR control structure.
993 * Searched LKey on success, UINT32_MAX on no match.
996 mlx5_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
1000 uint16_t bh_idx = 0;
1001 /* Victim in top-half cache to replace with new entry. */
1002 struct mlx5_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
1004 /* Binary-search MR translation table. */
1005 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1006 /* Update top-half cache. */
1007 if (likely(lkey != UINT32_MAX)) {
1008 *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1011 * If missed in local lookup table, search in the global cache
1012 * and local cache_bh[] will be updated inside if possible.
1013 * Top-half cache entry will also be updated.
1015 lkey = mlx5_mr_lookup_dev(dev, mr_ctrl, repl, addr);
1016 if (unlikely(lkey == UINT32_MAX))
1019 /* Update the most recently used entry. */
1020 mr_ctrl->mru = mr_ctrl->head;
1021 /* Point to the next victim, the oldest. */
1022 mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1027 * Bottom-half of LKey search on Rx.
1030 * Pointer to Rx queue structure.
1035 * Searched LKey on success, UINT32_MAX on no match.
1038 mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
1040 struct mlx5_rxq_ctrl *rxq_ctrl =
1041 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1042 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
1043 struct mlx5_priv *priv = rxq_ctrl->priv;
1045 return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1049 * Bottom-half of LKey search on Tx.
1052 * Pointer to Tx queue structure.
1057 * Searched LKey on success, UINT32_MAX on no match.
1060 mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
1062 struct mlx5_txq_ctrl *txq_ctrl =
1063 container_of(txq, struct mlx5_txq_ctrl, txq);
1064 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
1065 struct mlx5_priv *priv = txq_ctrl->priv;
1067 return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1071 * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
1072 * list, register the mempool of the mbuf as externally allocated memory.
1075 * Pointer to Tx queue structure.
1080 * Searched LKey on success, UINT32_MAX on no match.
1083 mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
1085 uintptr_t addr = (uintptr_t)mb->buf_addr;
1088 lkey = mlx5_tx_addr2mr_bh(txq, addr);
1089 if (lkey == UINT32_MAX && rte_errno == ENXIO) {
1090 /* Mempool may have externally allocated memory. */
1091 return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
1097 * Flush all of the local cache entries.
1100 * Pointer to per-queue MR control structure.
1103 mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1105 /* Reset the most-recently-used index. */
1107 /* Reset the linear search array. */
1109 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1110 /* Reset the B-tree table. */
1111 mr_ctrl->cache_bh.len = 1;
1112 mr_ctrl->cache_bh.overflow = 0;
1113 /* Update the generation number. */
1114 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1115 DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1116 (void *)mr_ctrl, mr_ctrl->cur_gen);
1120 * Creates a memory region for external memory, that is memory which is not
1121 * part of the DPDK memory segments.
1124 * Pointer to the ethernet device.
1126 * Starting virtual address of memory.
1128 * Length of memory segment being mapped.
1130 * Socket to allocate heap memory for the control structures.
1133 * Pointer to MR structure on success, NULL otherwise.
1135 static struct mlx5_mr *
1136 mlx5_create_mr_ext(struct rte_eth_dev *dev, uintptr_t addr, size_t len,
1139 struct mlx5_priv *priv = dev->data->dev_private;
1140 struct mlx5_mr *mr = NULL;
1142 mr = rte_zmalloc_socket(NULL,
1143 RTE_ALIGN_CEIL(sizeof(*mr),
1144 RTE_CACHE_LINE_SIZE),
1145 RTE_CACHE_LINE_SIZE, socket_id);
1148 mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)addr, len,
1149 IBV_ACCESS_LOCAL_WRITE);
1150 if (mr->ibv_mr == NULL) {
1152 "port %u fail to create a verbs MR for address (%p)",
1153 dev->data->port_id, (void *)addr);
1157 mr->msl = NULL; /* Mark it is external memory. */
1162 "port %u MR CREATED (%p) for external memory %p:\n"
1163 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1164 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1165 dev->data->port_id, (void *)mr, (void *)addr,
1166 addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
1167 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1172 * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
1174 * Externally allocated chunk is registered and a MR is created for the chunk.
1175 * The MR object is added to the global list. If memseg list of a MR object
1176 * (mr->msl) is null, the MR object can be regarded as externally allocated
1179 * Once external memory is registered, it should be static. If the memory is
1180 * freed and the virtual address range has different physical memory mapped
1181 * again, it may cause crash on device due to the wrong translation entry. PMD
1182 * can't track the free event of the external memory for now.
1185 mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
1186 struct rte_mempool_memhdr *memhdr,
1187 unsigned mem_idx __rte_unused)
1189 struct mr_update_mp_data *data = opaque;
1190 struct rte_eth_dev *dev = data->dev;
1191 struct mlx5_priv *priv = dev->data->dev_private;
1192 struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
1193 struct mlx5_mr *mr = NULL;
1194 uintptr_t addr = (uintptr_t)memhdr->addr;
1195 size_t len = memhdr->len;
1196 struct mlx5_mr_cache entry;
1199 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
1200 /* If already registered, it should return. */
1201 rte_rwlock_read_lock(&priv->mr.rwlock);
1202 lkey = mr_lookup_dev(dev, &entry, addr);
1203 rte_rwlock_read_unlock(&priv->mr.rwlock);
1204 if (lkey != UINT32_MAX)
1206 DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
1207 dev->data->port_id, mem_idx, mp->name);
1208 mr = mlx5_create_mr_ext(dev, addr, len, mp->socket_id);
1211 "port %u unable to allocate a new MR of"
1213 dev->data->port_id, mp->name);
1217 rte_rwlock_write_lock(&priv->mr.rwlock);
1218 LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
1219 /* Insert to the global cache table. */
1220 mr_insert_dev_cache(dev, mr);
1221 rte_rwlock_write_unlock(&priv->mr.rwlock);
1222 /* Insert to the local cache table */
1223 mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr);
1227 * Finds the first ethdev that match the pci device.
1228 * The existence of multiple ethdev per pci device is only with representors.
1229 * On such case, it is enough to get only one of the ports as they all share
1230 * the same ibv context.
1233 * Pointer to the PCI device.
1236 * Pointer to the ethdev if found, NULL otherwise.
1238 static struct rte_eth_dev *
1239 pci_dev_to_eth_dev(struct rte_pci_device *pdev)
1241 struct rte_dev_iterator it;
1242 struct rte_device *dev;
1245 * We really need to iterate all devices regardless of
1248 RTE_DEV_FOREACH(dev, "class=eth", &it)
1249 if (dev == &pdev->device)
1250 return it.class_device;
1255 * DPDK callback to DMA map external memory to a PCI device.
1258 * Pointer to the PCI device.
1260 * Starting virtual address of memory to be mapped.
1262 * Starting IOVA address of memory to be mapped.
1264 * Length of memory segment being mapped.
1267 * 0 on success, negative value on error.
1270 mlx5_dma_map(struct rte_pci_device *pdev, void *addr,
1271 uint64_t iova __rte_unused, size_t len)
1273 struct rte_eth_dev *dev;
1275 struct mlx5_priv *priv;
1277 dev = pci_dev_to_eth_dev(pdev);
1279 DRV_LOG(WARNING, "unable to find matching ethdev "
1280 "to PCI device %p", (void *)pdev);
1284 priv = dev->data->dev_private;
1285 mr = mlx5_create_mr_ext(dev, (uintptr_t)addr, len, SOCKET_ID_ANY);
1288 "port %u unable to dma map", dev->data->port_id);
1292 rte_rwlock_write_lock(&priv->mr.rwlock);
1293 LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
1294 /* Insert to the global cache table. */
1295 mr_insert_dev_cache(dev, mr);
1296 rte_rwlock_write_unlock(&priv->mr.rwlock);
1301 * DPDK callback to DMA unmap external memory to a PCI device.
1304 * Pointer to the PCI device.
1306 * Starting virtual address of memory to be unmapped.
1308 * Starting IOVA address of memory to be unmapped.
1310 * Length of memory segment being unmapped.
1313 * 0 on success, negative value on error.
1316 mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr,
1317 uint64_t iova __rte_unused, size_t len __rte_unused)
1319 struct rte_eth_dev *dev;
1320 struct mlx5_priv *priv;
1322 struct mlx5_mr_cache entry;
1324 dev = pci_dev_to_eth_dev(pdev);
1326 DRV_LOG(WARNING, "unable to find matching ethdev "
1327 "to PCI device %p", (void *)pdev);
1331 priv = dev->data->dev_private;
1332 rte_rwlock_read_lock(&priv->mr.rwlock);
1333 mr = mr_lookup_dev_list(dev, &entry, (uintptr_t)addr);
1335 rte_rwlock_read_unlock(&priv->mr.rwlock);
1336 DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered "
1337 "to PCI device %p", (uintptr_t)addr,
1342 LIST_REMOVE(mr, mr);
1343 LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
1344 DEBUG("port %u remove MR(%p) from list", dev->data->port_id,
1346 mr_rebuild_dev_cache(dev);
1348 * Flush local caches by propagating invalidation across cores.
1349 * rte_smp_wmb() is enough to synchronize this event. If one of
1350 * freed memsegs is seen by other core, that means the memseg
1351 * has been allocated by allocator, which will come after this
1352 * free call. Therefore, this store instruction (incrementing
1353 * generation below) will be guaranteed to be seen by other core
1354 * before the core sees the newly allocated memory.
1357 DEBUG("broadcasting local cache flush, gen=%d",
1360 rte_rwlock_read_unlock(&priv->mr.rwlock);
1365 * Register MR for entire memory chunks in a Mempool having externally allocated
1366 * memory and fill in local cache.
1369 * Pointer to Ethernet device.
1371 * Pointer to per-queue MR control structure.
1373 * Pointer to registering Mempool.
1376 * 0 on success, -1 on failure.
1379 mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
1380 struct rte_mempool *mp)
1382 struct mr_update_mp_data data = {
1388 rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data);
1393 * Register MR entire memory chunks in a Mempool having externally allocated
1394 * memory and search LKey of the address to return.
1397 * Pointer to Ethernet device.
1401 * Pointer to registering Mempool where addr belongs.
1404 * LKey for address on success, UINT32_MAX on failure.
1407 mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
1408 struct rte_mempool *mp)
1410 struct mlx5_txq_ctrl *txq_ctrl =
1411 container_of(txq, struct mlx5_txq_ctrl, txq);
1412 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
1413 struct mlx5_priv *priv = txq_ctrl->priv;
1415 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1417 "port %u using address (%p) from unregistered mempool"
1418 " having externally allocated memory"
1419 " in secondary process, please create mempool"
1420 " prior to rte_eth_dev_start()",
1421 PORT_ID(priv), (void *)addr);
1424 mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
1425 return mlx5_tx_addr2mr_bh(txq, addr);
1428 /* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
1430 mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
1431 struct rte_mempool_memhdr *memhdr,
1432 unsigned mem_idx __rte_unused)
1434 struct mr_update_mp_data *data = opaque;
1437 /* Stop iteration if failed in the previous walk. */
1440 /* Register address of the chunk and update local caches. */
1441 lkey = mlx5_mr_addr2mr_bh(data->dev, data->mr_ctrl,
1442 (uintptr_t)memhdr->addr);
1443 if (lkey == UINT32_MAX)
1448 * Register entire memory chunks in a Mempool.
1451 * Pointer to Ethernet device.
1453 * Pointer to per-queue MR control structure.
1455 * Pointer to registering Mempool.
1458 * 0 on success, -1 on failure.
1461 mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
1462 struct rte_mempool *mp)
1464 struct mr_update_mp_data data = {
1470 rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
1471 if (data.ret < 0 && rte_errno == ENXIO) {
1472 /* Mempool may have externally allocated memory. */
1473 return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp);
1479 * Dump all the created MRs and the global cache entries.
1482 * Pointer to Ethernet device.
1485 mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)
1488 struct mlx5_priv *priv = dev->data->dev_private;
1493 rte_rwlock_read_lock(&priv->mr.rwlock);
1494 /* Iterate all the existing MRs. */
1495 LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
1498 DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
1499 dev->data->port_id, mr_n++,
1500 rte_cpu_to_be_32(mr->ibv_mr->lkey),
1501 mr->ms_n, mr->ms_bmp_n);
1504 for (n = 0; n < mr->ms_bmp_n; ) {
1505 struct mlx5_mr_cache ret = { 0, };
1507 n = mr_find_next_chunk(mr, &ret, n);
1510 DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1511 chunk_n++, ret.start, ret.end);
1514 DEBUG("port %u dumping global cache", dev->data->port_id);
1515 mlx5_mr_btree_dump(&priv->mr.cache);
1516 rte_rwlock_read_unlock(&priv->mr.rwlock);
1521 * Release all the created MRs and resources. Remove device from memory callback
1525 * Pointer to Ethernet device.
1528 mlx5_mr_release(struct rte_eth_dev *dev)
1530 struct mlx5_priv *priv = dev->data->dev_private;
1531 struct mlx5_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
1533 /* Remove from memory callback device list. */
1534 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
1535 LIST_REMOVE(priv, mem_event_cb);
1536 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
1537 if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
1538 mlx5_mr_dump_dev(dev);
1539 rte_rwlock_write_lock(&priv->mr.rwlock);
1540 /* Detach from MR list and move to free list. */
1541 while (mr_next != NULL) {
1542 struct mlx5_mr *mr = mr_next;
1544 mr_next = LIST_NEXT(mr, mr);
1545 LIST_REMOVE(mr, mr);
1546 LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
1548 LIST_INIT(&priv->mr.mr_list);
1549 /* Free global cache. */
1550 mlx5_mr_btree_free(&priv->mr.cache);
1551 rte_rwlock_write_unlock(&priv->mr.rwlock);
1552 /* Free all remaining MRs. */
1553 mlx5_mr_garbage_collect(dev);