1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
7 #pragma GCC diagnostic ignored "-Wpedantic"
9 #include <infiniband/verbs.h>
11 #pragma GCC diagnostic error "-Wpedantic"
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_rwlock.h>
20 #include "mlx5_rxtx.h"
21 #include "mlx5_glue.h"
23 struct mr_find_contig_memsegs_data {
27 const struct rte_memseg_list *msl;
30 struct mr_update_mp_data {
31 struct rte_eth_dev *dev;
32 struct mlx5_mr_ctrl *mr_ctrl;
37 * Expand B-tree table to a given size. Can't be called with holding
38 * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc().
41 * Pointer to B-tree structure.
43 * Number of entries for expansion.
46 * 0 on success, -1 on failure.
49 mr_btree_expand(struct mlx5_mr_btree *bt, int n)
57 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
58 * used inside if there's no room to expand. Because this is a quite
59 * rare case and a part of very slow path, it is very acceptable.
60 * Initially cache_bh[] will be given practically enough space and once
61 * it is expanded, expansion wouldn't be needed again ever.
63 mem = rte_realloc(bt->table, n * sizeof(struct mlx5_mr_cache), 0);
65 /* Not an error, B-tree search will be skipped. */
66 DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
70 DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
78 * Look up LKey from given B-tree lookup table, store the last index and return
82 * Pointer to B-tree structure.
84 * Pointer to index. Even on search failure, returns index where it stops
85 * searching so that index can be used when inserting a new entry.
90 * Searched LKey on success, UINT32_MAX on no match.
93 mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
95 struct mlx5_mr_cache *lkp_tbl;
100 lkp_tbl = *bt->table;
102 /* First entry must be NULL for comparison. */
103 assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
104 lkp_tbl[0].lkey == UINT32_MAX));
107 register uint16_t delta = n >> 1;
109 if (addr < lkp_tbl[base + delta].start) {
116 assert(addr >= lkp_tbl[base].start);
118 if (addr < lkp_tbl[base].end)
119 return lkp_tbl[base].lkey;
125 * Insert an entry to B-tree lookup table.
128 * Pointer to B-tree structure.
130 * Pointer to new entry to insert.
133 * 0 on success, -1 on failure.
136 mr_btree_insert(struct mlx5_mr_btree *bt, struct mlx5_mr_cache *entry)
138 struct mlx5_mr_cache *lkp_tbl;
143 assert(bt->len <= bt->size);
145 lkp_tbl = *bt->table;
146 /* Find out the slot for insertion. */
147 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
149 "abort insertion to B-tree(%p): already exist at"
150 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
151 (void *)bt, idx, entry->start, entry->end, entry->lkey);
152 /* Already exist, return. */
155 /* If table is full, return error. */
156 if (unlikely(bt->len == bt->size)) {
162 shift = (bt->len - idx) * sizeof(struct mlx5_mr_cache);
164 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
165 lkp_tbl[idx] = *entry;
168 "inserted B-tree(%p)[%u],"
169 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
170 (void *)bt, idx, entry->start, entry->end, entry->lkey);
175 * Initialize B-tree and allocate memory for lookup table.
178 * Pointer to B-tree structure.
180 * Number of entries to allocate.
182 * NUMA socket on which memory must be allocated.
185 * 0 on success, a negative errno value otherwise and rte_errno is set.
188 mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
194 assert(!bt->table && !bt->size);
195 memset(bt, 0, sizeof(*bt));
196 bt->table = rte_calloc_socket("B-tree table",
197 n, sizeof(struct mlx5_mr_cache),
199 if (bt->table == NULL) {
201 DEBUG("failed to allocate memory for btree cache on socket %d",
206 /* First entry must be NULL for binary search. */
207 (*bt->table)[bt->len++] = (struct mlx5_mr_cache) {
210 DEBUG("initialized B-tree %p with table %p",
211 (void *)bt, (void *)bt->table);
216 * Free B-tree resources.
219 * Pointer to B-tree structure.
222 mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
226 DEBUG("freeing B-tree %p with table %p",
227 (void *)bt, (void *)bt->table);
229 memset(bt, 0, sizeof(*bt));
233 * Dump all the entries in a B-tree
236 * Pointer to B-tree structure.
239 mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
243 struct mlx5_mr_cache *lkp_tbl;
247 lkp_tbl = *bt->table;
248 for (idx = 0; idx < bt->len; ++idx) {
249 struct mlx5_mr_cache *entry = &lkp_tbl[idx];
251 DEBUG("B-tree(%p)[%u],"
252 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
253 (void *)bt, idx, entry->start, entry->end, entry->lkey);
259 * Find virtually contiguous memory chunk in a given MR.
262 * Pointer to MR structure.
264 * Pointer to returning MR cache entry. If not found, this will not be
267 * Start index of the memseg bitmap.
270 * Next index to go on lookup.
273 mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
280 /* MR for external memory doesn't have memseg list. */
281 if (mr->msl == NULL) {
282 struct ibv_mr *ibv_mr = mr->ibv_mr;
284 assert(mr->ms_bmp_n == 1);
285 assert(mr->ms_n == 1);
286 assert(base_idx == 0);
288 * Can't search it from memseg list but get it directly from
289 * verbs MR as there's only one chunk.
291 entry->start = (uintptr_t)ibv_mr->addr;
292 entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length;
293 entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
294 /* Returning 1 ends iteration. */
297 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
298 if (rte_bitmap_get(mr->ms_bmp, idx)) {
299 const struct rte_memseg_list *msl;
300 const struct rte_memseg *ms;
303 ms = rte_fbarray_get(&msl->memseg_arr,
304 mr->ms_base_idx + idx);
305 assert(msl->page_sz == ms->hugepage_sz);
308 end = ms->addr_64 + ms->hugepage_sz;
310 /* Passed the end of a fragment. */
315 /* Found one chunk. */
316 entry->start = start;
318 entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
324 * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
325 * Then, this entry will have to be searched by mr_lookup_dev_list() in
326 * mlx5_mr_create() on miss.
329 * Pointer to Ethernet device.
331 * Pointer to MR to insert.
334 * 0 on success, -1 on failure.
337 mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr)
339 struct mlx5_priv *priv = dev->data->dev_private;
342 DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
343 dev->data->port_id, (void *)mr);
344 for (n = 0; n < mr->ms_bmp_n; ) {
345 struct mlx5_mr_cache entry;
347 memset(&entry, 0, sizeof(entry));
348 /* Find a contiguous chunk and advance the index. */
349 n = mr_find_next_chunk(mr, &entry, n);
352 if (mr_btree_insert(&priv->mr.cache, &entry) < 0) {
354 * Overflowed, but the global table cannot be expanded
355 * because of deadlock.
364 * Look up address in the original global MR list.
367 * Pointer to Ethernet device.
369 * Pointer to returning MR cache entry. If no match, this will not be updated.
374 * Found MR on match, NULL otherwise.
376 static struct mlx5_mr *
377 mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
380 struct mlx5_priv *priv = dev->data->dev_private;
383 /* Iterate all the existing MRs. */
384 LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
389 for (n = 0; n < mr->ms_bmp_n; ) {
390 struct mlx5_mr_cache ret;
392 memset(&ret, 0, sizeof(ret));
393 n = mr_find_next_chunk(mr, &ret, n);
394 if (addr >= ret.start && addr < ret.end) {
405 * Look up address on device.
408 * Pointer to Ethernet device.
410 * Pointer to returning MR cache entry. If no match, this will not be updated.
415 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
418 mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
421 struct mlx5_priv *priv = dev->data->dev_private;
423 uint32_t lkey = UINT32_MAX;
427 * If the global cache has overflowed since it failed to expand the
428 * B-tree table, it can't have all the existing MRs. Then, the address
429 * has to be searched by traversing the original MR list instead, which
430 * is very slow path. Otherwise, the global cache is all inclusive.
432 if (!unlikely(priv->mr.cache.overflow)) {
433 lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
434 if (lkey != UINT32_MAX)
435 *entry = (*priv->mr.cache.table)[idx];
437 /* Falling back to the slowest path. */
438 mr = mr_lookup_dev_list(dev, entry, addr);
442 assert(lkey == UINT32_MAX || (addr >= entry->start &&
448 * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
449 * can raise memory free event and the callback function will spin on the lock.
452 * Pointer to MR to free.
455 mr_free(struct mlx5_mr *mr)
459 DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
460 if (mr->ibv_mr != NULL)
461 claim_zero(mlx5_glue->dereg_mr(mr->ibv_mr));
462 if (mr->ms_bmp != NULL)
463 rte_bitmap_free(mr->ms_bmp);
468 * Releass resources of detached MR having no online entry.
471 * Pointer to Ethernet device.
474 mlx5_mr_garbage_collect(struct rte_eth_dev *dev)
476 struct mlx5_priv *priv = dev->data->dev_private;
477 struct mlx5_mr *mr_next;
478 struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
480 /* Must be called from the primary process. */
481 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
483 * MR can't be freed with holding the lock because rte_free() could call
484 * memory free callback function. This will be a deadlock situation.
486 rte_rwlock_write_lock(&priv->mr.rwlock);
487 /* Detach the whole free list and release it after unlocking. */
488 free_list = priv->mr.mr_free_list;
489 LIST_INIT(&priv->mr.mr_free_list);
490 rte_rwlock_write_unlock(&priv->mr.rwlock);
491 /* Release resources. */
492 mr_next = LIST_FIRST(&free_list);
493 while (mr_next != NULL) {
494 struct mlx5_mr *mr = mr_next;
496 mr_next = LIST_NEXT(mr, mr);
501 /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
503 mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
504 const struct rte_memseg *ms, size_t len, void *arg)
506 struct mr_find_contig_memsegs_data *data = arg;
508 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
510 /* Found, save it and stop walking. */
511 data->start = ms->addr_64;
512 data->end = ms->addr_64 + len;
518 * Create a new global Memroy Region (MR) for a missing virtual address.
519 * Register entire virtually contiguous memory chunk around the address.
522 * Pointer to Ethernet device.
524 * Pointer to returning MR cache entry, found in the global cache or newly
525 * created. If failed to create one, this will not be updated.
527 * Target virtual address to register.
530 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
533 mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
536 struct mlx5_priv *priv = dev->data->dev_private;
537 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
538 const struct rte_memseg_list *msl;
539 const struct rte_memseg *ms;
540 struct mlx5_mr *mr = NULL;
545 int ms_idx_shift = -1;
547 struct mr_find_contig_memsegs_data data = {
550 struct mr_find_contig_memsegs_data data_re;
552 DRV_LOG(DEBUG, "port %u creating a MR using address (%p)",
553 dev->data->port_id, (void *)addr);
554 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
556 "port %u using address (%p) of unregistered mempool"
557 " in secondary process, please create mempool"
558 " before rte_eth_dev_start()",
559 dev->data->port_id, (void *)addr);
564 * Release detached MRs if any. This can't be called with holding either
565 * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have
566 * been detached by the memory free event but it couldn't be released
567 * inside the callback due to deadlock. As a result, releasing resources
568 * is quite opportunistic.
570 mlx5_mr_garbage_collect(dev);
572 * Find out a contiguous virtual address chunk in use, to which the
573 * given address belongs, in order to register maximum range. In the
574 * best case where mempools are not dynamically recreated and
575 * '--socket-mem' is specified as an EAL option, it is very likely to
576 * have only one MR(LKey) per a socket and per a hugepage-size even
577 * though the system memory is highly fragmented.
579 if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
581 "port %u unable to find virtually contiguous"
582 " chunk for address (%p)."
583 " rte_memseg_contig_walk() failed.",
584 dev->data->port_id, (void *)addr);
589 /* Addresses must be page-aligned. */
590 assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
591 assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
593 ms = rte_mem_virt2memseg((void *)data.start, msl);
594 len = data.end - data.start;
595 assert(msl->page_sz == ms->hugepage_sz);
596 /* Number of memsegs in the range. */
597 ms_n = len / msl->page_sz;
598 DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
599 " page_sz=0x%" PRIx64 ", ms_n=%u",
600 dev->data->port_id, (void *)addr,
601 data.start, data.end, msl->page_sz, ms_n);
602 /* Size of memory for bitmap. */
603 bmp_size = rte_bitmap_get_memory_footprint(ms_n);
604 mr = rte_zmalloc_socket(NULL,
605 RTE_ALIGN_CEIL(sizeof(*mr),
606 RTE_CACHE_LINE_SIZE) +
608 RTE_CACHE_LINE_SIZE, msl->socket_id);
610 DEBUG("port %u unable to allocate memory for a new MR of"
612 dev->data->port_id, (void *)addr);
618 * Save the index of the first memseg and initialize memseg bitmap. To
619 * see if a memseg of ms_idx in the memseg-list is still valid, check:
620 * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
622 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
623 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
624 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
625 if (mr->ms_bmp == NULL) {
626 DEBUG("port %u unable to initialize bitamp for a new MR of"
628 dev->data->port_id, (void *)addr);
633 * Should recheck whether the extended contiguous chunk is still valid.
634 * Because memory_hotplug_lock can't be held if there's any memory
635 * related calls in a critical path, resource allocation above can't be
636 * locked. If the memory has been changed at this point, try again with
637 * just single page. If not, go on with the big chunk atomically from
640 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
642 if (len > msl->page_sz &&
643 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
644 DEBUG("port %u unable to find virtually contiguous"
645 " chunk for address (%p)."
646 " rte_memseg_contig_walk() failed.",
647 dev->data->port_id, (void *)addr);
651 if (data.start != data_re.start || data.end != data_re.end) {
653 * The extended contiguous chunk has been changed. Try again
654 * with single memseg instead.
656 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
657 data.end = data.start + msl->page_sz;
658 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
660 goto alloc_resources;
662 assert(data.msl == data_re.msl);
663 rte_rwlock_write_lock(&priv->mr.rwlock);
665 * Check the address is really missing. If other thread already created
666 * one or it is not found due to overflow, abort and return.
668 if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
670 * Insert to the global cache table. It may fail due to
671 * low-on-memory. Then, this entry will have to be searched
674 mr_btree_insert(&priv->mr.cache, entry);
675 DEBUG("port %u found MR for %p on final lookup, abort",
676 dev->data->port_id, (void *)addr);
677 rte_rwlock_write_unlock(&priv->mr.rwlock);
678 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
680 * Must be unlocked before calling rte_free() because
681 * mlx5_mr_mem_event_free_cb() can be called inside.
687 * Trim start and end addresses for verbs MR. Set bits for registering
688 * memsegs but exclude already registered ones. Bitmap can be
691 for (n = 0; n < ms_n; ++n) {
693 struct mlx5_mr_cache ret;
695 memset(&ret, 0, sizeof(ret));
696 start = data_re.start + n * msl->page_sz;
697 /* Exclude memsegs already registered by other MRs. */
698 if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
700 * Start from the first unregistered memseg in the
703 if (ms_idx_shift == -1) {
704 mr->ms_base_idx += n;
708 data.end = start + msl->page_sz;
709 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
713 len = data.end - data.start;
714 mr->ms_bmp_n = len / msl->page_sz;
715 assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
717 * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
718 * called with holding the memory lock because it doesn't use
719 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
720 * through mlx5_alloc_verbs_buf().
722 mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
723 IBV_ACCESS_LOCAL_WRITE);
724 if (mr->ibv_mr == NULL) {
725 DEBUG("port %u fail to create a verbs MR for address (%p)",
726 dev->data->port_id, (void *)addr);
730 assert((uintptr_t)mr->ibv_mr->addr == data.start);
731 assert(mr->ibv_mr->length == len);
732 LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
733 DEBUG("port %u MR CREATED (%p) for %p:\n"
734 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
735 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
736 dev->data->port_id, (void *)mr, (void *)addr,
737 data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
738 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
739 /* Insert to the global cache table. */
740 mr_insert_dev_cache(dev, mr);
741 /* Fill in output data. */
742 mr_lookup_dev(dev, entry, addr);
743 /* Lookup can't fail. */
744 assert(entry->lkey != UINT32_MAX);
745 rte_rwlock_write_unlock(&priv->mr.rwlock);
746 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
749 rte_rwlock_write_unlock(&priv->mr.rwlock);
751 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
754 * In case of error, as this can be called in a datapath, a warning
755 * message per an error is preferable instead. Must be unlocked before
756 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
764 * Rebuild the global B-tree cache of device from the original MR list.
767 * Pointer to Ethernet device.
770 mr_rebuild_dev_cache(struct rte_eth_dev *dev)
772 struct mlx5_priv *priv = dev->data->dev_private;
775 DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id);
776 /* Flush cache to rebuild. */
777 priv->mr.cache.len = 1;
778 priv->mr.cache.overflow = 0;
779 /* Iterate all the existing MRs. */
780 LIST_FOREACH(mr, &priv->mr.mr_list, mr)
781 if (mr_insert_dev_cache(dev, mr) < 0)
786 * Callback for memory free event. Iterate freed memsegs and check whether it
787 * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
788 * result, the MR would be fragmented. If it becomes empty, the MR will be freed
789 * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
790 * secondary process, the garbage collector will be called in primary process
791 * as the secondary process can't call mlx5_mr_create().
793 * The global cache must be rebuilt if there's any change and this event has to
794 * be propagated to dataplane threads to flush the local caches.
797 * Pointer to Ethernet device.
799 * Address of freed memory.
801 * Size of freed memory.
804 mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
806 struct mlx5_priv *priv = dev->data->dev_private;
807 const struct rte_memseg_list *msl;
813 DEBUG("port %u free callback: addr=%p, len=%zu",
814 dev->data->port_id, addr, len);
815 msl = rte_mem_virt2memseg_list(addr);
816 /* addr and len must be page-aligned. */
817 assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
818 assert(len == RTE_ALIGN(len, msl->page_sz));
819 ms_n = len / msl->page_sz;
820 rte_rwlock_write_lock(&priv->mr.rwlock);
821 /* Clear bits of freed memsegs from MR. */
822 for (i = 0; i < ms_n; ++i) {
823 const struct rte_memseg *ms;
824 struct mlx5_mr_cache entry;
829 /* Find MR having this memseg. */
830 start = (uintptr_t)addr + i * msl->page_sz;
831 mr = mr_lookup_dev_list(dev, &entry, start);
834 assert(mr->msl); /* Can't be external memory. */
835 ms = rte_mem_virt2memseg((void *)start, msl);
837 assert(msl->page_sz == ms->hugepage_sz);
838 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
839 pos = ms_idx - mr->ms_base_idx;
840 assert(rte_bitmap_get(mr->ms_bmp, pos));
841 assert(pos < mr->ms_bmp_n);
842 DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
843 dev->data->port_id, (void *)mr, pos, (void *)start);
844 rte_bitmap_clear(mr->ms_bmp, pos);
845 if (--mr->ms_n == 0) {
847 LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
848 DEBUG("port %u remove MR(%p) from list",
849 dev->data->port_id, (void *)mr);
852 * MR is fragmented or will be freed. the global cache must be
858 mr_rebuild_dev_cache(dev);
860 * Flush local caches by propagating invalidation across cores.
861 * rte_smp_wmb() is enough to synchronize this event. If one of
862 * freed memsegs is seen by other core, that means the memseg
863 * has been allocated by allocator, which will come after this
864 * free call. Therefore, this store instruction (incrementing
865 * generation below) will be guaranteed to be seen by other core
866 * before the core sees the newly allocated memory.
869 DEBUG("broadcasting local cache flush, gen=%d",
873 rte_rwlock_write_unlock(&priv->mr.rwlock);
877 * Callback for memory event. This can be called from both primary and secondary
888 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
889 size_t len, void *arg __rte_unused)
891 struct mlx5_priv *priv;
892 struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
894 switch (event_type) {
895 case RTE_MEM_EVENT_FREE:
896 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
897 /* Iterate all the existing mlx5 devices. */
898 LIST_FOREACH(priv, dev_list, mem_event_cb)
899 mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
900 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
902 case RTE_MEM_EVENT_ALLOC:
909 * Look up address in the global MR cache table. If not found, create a new MR.
910 * Insert the found/created entry to local bottom-half cache table.
913 * Pointer to Ethernet device.
915 * Pointer to per-queue MR control structure.
917 * Pointer to returning MR cache entry, found in the global cache or newly
918 * created. If failed to create one, this is not written.
923 * Searched LKey on success, UINT32_MAX on no match.
926 mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
927 struct mlx5_mr_cache *entry, uintptr_t addr)
929 struct mlx5_priv *priv = dev->data->dev_private;
930 struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
934 /* If local cache table is full, try to double it. */
935 if (unlikely(bt->len == bt->size))
936 mr_btree_expand(bt, bt->size << 1);
937 /* Look up in the global cache. */
938 rte_rwlock_read_lock(&priv->mr.rwlock);
939 lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
940 if (lkey != UINT32_MAX) {
942 *entry = (*priv->mr.cache.table)[idx];
943 rte_rwlock_read_unlock(&priv->mr.rwlock);
945 * Update local cache. Even if it fails, return the found entry
946 * to update top-half cache. Next time, this entry will be found
947 * in the global cache.
949 mr_btree_insert(bt, entry);
952 rte_rwlock_read_unlock(&priv->mr.rwlock);
953 /* First time to see the address? Create a new MR. */
954 lkey = mlx5_mr_create(dev, entry, addr);
956 * Update the local cache if successfully created a new global MR. Even
957 * if failed to create one, there's no action to take in this datapath
958 * code. As returning LKey is invalid, this will eventually make HW
961 if (lkey != UINT32_MAX)
962 mr_btree_insert(bt, entry);
967 * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
968 * misses, search in the global MR cache table and update the new entry to
969 * per-queue local caches.
972 * Pointer to Ethernet device.
974 * Pointer to per-queue MR control structure.
979 * Searched LKey on success, UINT32_MAX on no match.
982 mlx5_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
987 /* Victim in top-half cache to replace with new entry. */
988 struct mlx5_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
990 /* Binary-search MR translation table. */
991 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
992 /* Update top-half cache. */
993 if (likely(lkey != UINT32_MAX)) {
994 *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
997 * If missed in local lookup table, search in the global cache
998 * and local cache_bh[] will be updated inside if possible.
999 * Top-half cache entry will also be updated.
1001 lkey = mlx5_mr_lookup_dev(dev, mr_ctrl, repl, addr);
1002 if (unlikely(lkey == UINT32_MAX))
1005 /* Update the most recently used entry. */
1006 mr_ctrl->mru = mr_ctrl->head;
1007 /* Point to the next victim, the oldest. */
1008 mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
1013 * Bottom-half of LKey search on Rx.
1016 * Pointer to Rx queue structure.
1021 * Searched LKey on success, UINT32_MAX on no match.
1024 mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
1026 struct mlx5_rxq_ctrl *rxq_ctrl =
1027 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1028 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
1029 struct mlx5_priv *priv = rxq_ctrl->priv;
1032 "Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
1033 rxq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
1034 return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1038 * Bottom-half of LKey search on Tx.
1041 * Pointer to Tx queue structure.
1046 * Searched LKey on success, UINT32_MAX on no match.
1049 mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
1051 struct mlx5_txq_ctrl *txq_ctrl =
1052 container_of(txq, struct mlx5_txq_ctrl, txq);
1053 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
1054 struct mlx5_priv *priv = txq_ctrl->priv;
1057 "Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
1058 txq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
1059 return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1063 * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
1064 * list, register the mempool of the mbuf as externally allocated memory.
1067 * Pointer to Tx queue structure.
1072 * Searched LKey on success, UINT32_MAX on no match.
1075 mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
1077 uintptr_t addr = (uintptr_t)mb->buf_addr;
1080 lkey = mlx5_tx_addr2mr_bh(txq, addr);
1081 if (lkey == UINT32_MAX && rte_errno == ENXIO) {
1082 /* Mempool may have externally allocated memory. */
1083 return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
1089 * Flush all of the local cache entries.
1092 * Pointer to per-queue MR control structure.
1095 mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
1097 /* Reset the most-recently-used index. */
1099 /* Reset the linear search array. */
1101 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1102 /* Reset the B-tree table. */
1103 mr_ctrl->cache_bh.len = 1;
1104 mr_ctrl->cache_bh.overflow = 0;
1105 /* Update the generation number. */
1106 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1107 DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1108 (void *)mr_ctrl, mr_ctrl->cur_gen);
1112 * Creates a memory region for external memory, that is memory which is not
1113 * part of the DPDK memory segments.
1116 * Pointer to the ethernet device.
1118 * Starting virtual address of memory.
1120 * Length of memory segment being mapped.
1122 * Socket to allocate heap memory for the control structures.
1125 * Pointer to MR structure on success, NULL otherwise.
1127 static struct mlx5_mr *
1128 mlx5_create_mr_ext(struct rte_eth_dev *dev, uintptr_t addr, size_t len,
1131 struct mlx5_priv *priv = dev->data->dev_private;
1132 struct mlx5_mr *mr = NULL;
1134 mr = rte_zmalloc_socket(NULL,
1135 RTE_ALIGN_CEIL(sizeof(*mr),
1136 RTE_CACHE_LINE_SIZE),
1137 RTE_CACHE_LINE_SIZE, socket_id);
1140 mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len,
1141 IBV_ACCESS_LOCAL_WRITE);
1142 if (mr->ibv_mr == NULL) {
1144 "port %u fail to create a verbs MR for address (%p)",
1145 dev->data->port_id, (void *)addr);
1149 mr->msl = NULL; /* Mark it is external memory. */
1154 "port %u MR CREATED (%p) for external memory %p:\n"
1155 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1156 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1157 dev->data->port_id, (void *)mr, (void *)addr,
1158 addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
1159 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1164 * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
1166 * Externally allocated chunk is registered and a MR is created for the chunk.
1167 * The MR object is added to the global list. If memseg list of a MR object
1168 * (mr->msl) is null, the MR object can be regarded as externally allocated
1171 * Once external memory is registered, it should be static. If the memory is
1172 * freed and the virtual address range has different physical memory mapped
1173 * again, it may cause crash on device due to the wrong translation entry. PMD
1174 * can't track the free event of the external memory for now.
1177 mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
1178 struct rte_mempool_memhdr *memhdr,
1179 unsigned mem_idx __rte_unused)
1181 struct mr_update_mp_data *data = opaque;
1182 struct rte_eth_dev *dev = data->dev;
1183 struct mlx5_priv *priv = dev->data->dev_private;
1184 struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
1185 struct mlx5_mr *mr = NULL;
1186 uintptr_t addr = (uintptr_t)memhdr->addr;
1187 size_t len = memhdr->len;
1188 struct mlx5_mr_cache entry;
1191 /* If already registered, it should return. */
1192 rte_rwlock_read_lock(&priv->mr.rwlock);
1193 lkey = mr_lookup_dev(dev, &entry, addr);
1194 rte_rwlock_read_unlock(&priv->mr.rwlock);
1195 if (lkey != UINT32_MAX)
1197 DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
1198 dev->data->port_id, mem_idx, mp->name);
1199 mr = mlx5_create_mr_ext(dev, addr, len, mp->socket_id);
1202 "port %u unable to allocate a new MR of"
1204 dev->data->port_id, mp->name);
1208 rte_rwlock_write_lock(&priv->mr.rwlock);
1209 LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
1210 /* Insert to the global cache table. */
1211 mr_insert_dev_cache(dev, mr);
1212 rte_rwlock_write_unlock(&priv->mr.rwlock);
1213 /* Insert to the local cache table */
1214 mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr);
1218 * Register MR for entire memory chunks in a Mempool having externally allocated
1219 * memory and fill in local cache.
1222 * Pointer to Ethernet device.
1224 * Pointer to per-queue MR control structure.
1226 * Pointer to registering Mempool.
1229 * 0 on success, -1 on failure.
1232 mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
1233 struct rte_mempool *mp)
1235 struct mr_update_mp_data data = {
1241 rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data);
1246 * Register MR entire memory chunks in a Mempool having externally allocated
1247 * memory and search LKey of the address to return.
1250 * Pointer to Ethernet device.
1254 * Pointer to registering Mempool where addr belongs.
1257 * LKey for address on success, UINT32_MAX on failure.
1260 mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
1261 struct rte_mempool *mp)
1263 struct mlx5_txq_ctrl *txq_ctrl =
1264 container_of(txq, struct mlx5_txq_ctrl, txq);
1265 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
1266 struct mlx5_priv *priv = txq_ctrl->priv;
1268 mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
1269 return mlx5_tx_addr2mr_bh(txq, addr);
1272 /* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
1274 mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
1275 struct rte_mempool_memhdr *memhdr,
1276 unsigned mem_idx __rte_unused)
1278 struct mr_update_mp_data *data = opaque;
1281 /* Stop iteration if failed in the previous walk. */
1284 /* Register address of the chunk and update local caches. */
1285 lkey = mlx5_mr_addr2mr_bh(data->dev, data->mr_ctrl,
1286 (uintptr_t)memhdr->addr);
1287 if (lkey == UINT32_MAX)
1292 * Register entire memory chunks in a Mempool.
1295 * Pointer to Ethernet device.
1297 * Pointer to per-queue MR control structure.
1299 * Pointer to registering Mempool.
1302 * 0 on success, -1 on failure.
1305 mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
1306 struct rte_mempool *mp)
1308 struct mr_update_mp_data data = {
1314 rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
1315 if (data.ret < 0 && rte_errno == ENXIO) {
1316 /* Mempool may have externally allocated memory. */
1317 return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp);
1323 * Dump all the created MRs and the global cache entries.
1326 * Pointer to Ethernet device.
1329 mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)
1332 struct mlx5_priv *priv = dev->data->dev_private;
1337 rte_rwlock_read_lock(&priv->mr.rwlock);
1338 /* Iterate all the existing MRs. */
1339 LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
1342 DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
1343 dev->data->port_id, mr_n++,
1344 rte_cpu_to_be_32(mr->ibv_mr->lkey),
1345 mr->ms_n, mr->ms_bmp_n);
1348 for (n = 0; n < mr->ms_bmp_n; ) {
1349 struct mlx5_mr_cache ret = { 0, };
1351 n = mr_find_next_chunk(mr, &ret, n);
1354 DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1355 chunk_n++, ret.start, ret.end);
1358 DEBUG("port %u dumping global cache", dev->data->port_id);
1359 mlx5_mr_btree_dump(&priv->mr.cache);
1360 rte_rwlock_read_unlock(&priv->mr.rwlock);
1365 * Release all the created MRs and resources. Remove device from memory callback
1369 * Pointer to Ethernet device.
1372 mlx5_mr_release(struct rte_eth_dev *dev)
1374 struct mlx5_priv *priv = dev->data->dev_private;
1375 struct mlx5_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
1377 /* Remove from memory callback device list. */
1378 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
1379 LIST_REMOVE(priv, mem_event_cb);
1380 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
1381 if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
1382 mlx5_mr_dump_dev(dev);
1383 rte_rwlock_write_lock(&priv->mr.rwlock);
1384 /* Detach from MR list and move to free list. */
1385 while (mr_next != NULL) {
1386 struct mlx5_mr *mr = mr_next;
1388 mr_next = LIST_NEXT(mr, mr);
1389 LIST_REMOVE(mr, mr);
1390 LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
1392 LIST_INIT(&priv->mr.mr_list);
1393 /* Free global cache. */
1394 mlx5_mr_btree_free(&priv->mr.cache);
1395 rte_rwlock_write_unlock(&priv->mr.rwlock);
1396 /* Free all remaining MRs. */
1397 mlx5_mr_garbage_collect(dev);