1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
8 * Memory management functions for mlx4 driver.
18 /* Verbs headers do not support -pedantic. */
20 #pragma GCC diagnostic ignored "-Wpedantic"
22 #include <infiniband/verbs.h>
24 #pragma GCC diagnostic error "-Wpedantic"
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_errno.h>
30 #include <rte_malloc.h>
31 #include <rte_memory.h>
32 #include <rte_mempool.h>
33 #include <rte_rwlock.h>
35 #include "mlx4_glue.h"
37 #include "mlx4_rxtx.h"
38 #include "mlx4_utils.h"
40 struct mr_find_contig_memsegs_data {
44 const struct rte_memseg_list *msl;
47 struct mr_update_mp_data {
48 struct rte_eth_dev *dev;
49 struct mlx4_mr_ctrl *mr_ctrl;
54 * Expand B-tree table to a given size. Can't be called with holding
55 * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc().
58 * Pointer to B-tree structure.
60 * Number of entries for expansion.
63 * 0 on success, -1 on failure.
66 mr_btree_expand(struct mlx4_mr_btree *bt, int n)
74 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
75 * used inside if there's no room to expand. Because this is a quite
76 * rare case and a part of very slow path, it is very acceptable.
77 * Initially cache_bh[] will be given practically enough space and once
78 * it is expanded, expansion wouldn't be needed again ever.
80 mem = rte_realloc(bt->table, n * sizeof(struct mlx4_mr_cache), 0);
82 /* Not an error, B-tree search will be skipped. */
83 WARN("failed to expand MR B-tree (%p) table", (void *)bt);
86 DEBUG("expanded MR B-tree table (size=%u)", n);
94 * Look up LKey from given B-tree lookup table, store the last index and return
98 * Pointer to B-tree structure.
100 * Pointer to index. Even on search failure, returns index where it stops
101 * searching so that index can be used when inserting a new entry.
106 * Searched LKey on success, UINT32_MAX on no match.
109 mr_btree_lookup(struct mlx4_mr_btree *bt, uint16_t *idx, uintptr_t addr)
111 struct mlx4_mr_cache *lkp_tbl;
116 lkp_tbl = *bt->table;
118 /* First entry must be NULL for comparison. */
119 assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
120 lkp_tbl[0].lkey == UINT32_MAX));
123 register uint16_t delta = n >> 1;
125 if (addr < lkp_tbl[base + delta].start) {
132 assert(addr >= lkp_tbl[base].start);
134 if (addr < lkp_tbl[base].end)
135 return lkp_tbl[base].lkey;
141 * Insert an entry to B-tree lookup table.
144 * Pointer to B-tree structure.
146 * Pointer to new entry to insert.
149 * 0 on success, -1 on failure.
152 mr_btree_insert(struct mlx4_mr_btree *bt, struct mlx4_mr_cache *entry)
154 struct mlx4_mr_cache *lkp_tbl;
159 assert(bt->len <= bt->size);
161 lkp_tbl = *bt->table;
162 /* Find out the slot for insertion. */
163 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
164 DEBUG("abort insertion to B-tree(%p): already exist at"
165 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
166 (void *)bt, idx, entry->start, entry->end, entry->lkey);
167 /* Already exist, return. */
170 /* If table is full, return error. */
171 if (unlikely(bt->len == bt->size)) {
177 shift = (bt->len - idx) * sizeof(struct mlx4_mr_cache);
179 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
180 lkp_tbl[idx] = *entry;
182 DEBUG("inserted B-tree(%p)[%u],"
183 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
184 (void *)bt, idx, entry->start, entry->end, entry->lkey);
189 * Initialize B-tree and allocate memory for lookup table.
192 * Pointer to B-tree structure.
194 * Number of entries to allocate.
196 * NUMA socket on which memory must be allocated.
199 * 0 on success, a negative errno value otherwise and rte_errno is set.
202 mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket)
208 memset(bt, 0, sizeof(*bt));
209 bt->table = rte_calloc_socket("B-tree table",
210 n, sizeof(struct mlx4_mr_cache),
212 if (bt->table == NULL) {
214 ERROR("failed to allocate memory for btree cache on socket %d",
219 /* First entry must be NULL for binary search. */
220 (*bt->table)[bt->len++] = (struct mlx4_mr_cache) {
223 DEBUG("initialized B-tree %p with table %p",
224 (void *)bt, (void *)bt->table);
229 * Free B-tree resources.
232 * Pointer to B-tree structure.
235 mlx4_mr_btree_free(struct mlx4_mr_btree *bt)
239 DEBUG("freeing B-tree %p with table %p", (void *)bt, (void *)bt->table);
241 memset(bt, 0, sizeof(*bt));
246 * Dump all the entries in a B-tree
249 * Pointer to B-tree structure.
252 mlx4_mr_btree_dump(struct mlx4_mr_btree *bt)
255 struct mlx4_mr_cache *lkp_tbl;
259 lkp_tbl = *bt->table;
260 for (idx = 0; idx < bt->len; ++idx) {
261 struct mlx4_mr_cache *entry = &lkp_tbl[idx];
263 DEBUG("B-tree(%p)[%u],"
264 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
265 (void *)bt, idx, entry->start, entry->end, entry->lkey);
271 * Find virtually contiguous memory chunk in a given MR.
274 * Pointer to MR structure.
276 * Pointer to returning MR cache entry. If not found, this will not be
279 * Start index of the memseg bitmap.
282 * Next index to go on lookup.
285 mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry,
292 /* MR for external memory doesn't have memseg list. */
293 if (mr->msl == NULL) {
294 struct ibv_mr *ibv_mr = mr->ibv_mr;
296 assert(mr->ms_bmp_n == 1);
297 assert(mr->ms_n == 1);
298 assert(base_idx == 0);
300 * Can't search it from memseg list but get it directly from
301 * verbs MR as there's only one chunk.
303 entry->start = (uintptr_t)ibv_mr->addr;
304 entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length;
305 entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
306 /* Returning 1 ends iteration. */
309 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
310 if (rte_bitmap_get(mr->ms_bmp, idx)) {
311 const struct rte_memseg_list *msl;
312 const struct rte_memseg *ms;
315 ms = rte_fbarray_get(&msl->memseg_arr,
316 mr->ms_base_idx + idx);
317 assert(msl->page_sz == ms->hugepage_sz);
320 end = ms->addr_64 + ms->hugepage_sz;
322 /* Passed the end of a fragment. */
327 /* Found one chunk. */
328 entry->start = start;
330 entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
336 * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
337 * Then, this entry will have to be searched by mr_lookup_dev_list() in
338 * mlx4_mr_create() on miss.
341 * Pointer to Ethernet device.
343 * Pointer to MR to insert.
346 * 0 on success, -1 on failure.
349 mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr)
351 struct mlx4_priv *priv = dev->data->dev_private;
354 DEBUG("port %u inserting MR(%p) to global cache",
355 dev->data->port_id, (void *)mr);
356 for (n = 0; n < mr->ms_bmp_n; ) {
357 struct mlx4_mr_cache entry;
359 memset(&entry, 0, sizeof(entry));
360 /* Find a contiguous chunk and advance the index. */
361 n = mr_find_next_chunk(mr, &entry, n);
364 if (mr_btree_insert(&priv->mr.cache, &entry) < 0) {
366 * Overflowed, but the global table cannot be expanded
367 * because of deadlock.
376 * Look up address in the original global MR list.
379 * Pointer to Ethernet device.
381 * Pointer to returning MR cache entry. If no match, this will not be updated.
386 * Found MR on match, NULL otherwise.
388 static struct mlx4_mr *
389 mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
392 struct mlx4_priv *priv = dev->data->dev_private;
395 /* Iterate all the existing MRs. */
396 LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
401 for (n = 0; n < mr->ms_bmp_n; ) {
402 struct mlx4_mr_cache ret;
404 memset(&ret, 0, sizeof(ret));
405 n = mr_find_next_chunk(mr, &ret, n);
406 if (addr >= ret.start && addr < ret.end) {
417 * Look up address on device.
420 * Pointer to Ethernet device.
422 * Pointer to returning MR cache entry. If no match, this will not be updated.
427 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
430 mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
433 struct mlx4_priv *priv = dev->data->dev_private;
435 uint32_t lkey = UINT32_MAX;
439 * If the global cache has overflowed since it failed to expand the
440 * B-tree table, it can't have all the existing MRs. Then, the address
441 * has to be searched by traversing the original MR list instead, which
442 * is very slow path. Otherwise, the global cache is all inclusive.
444 if (!unlikely(priv->mr.cache.overflow)) {
445 lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
446 if (lkey != UINT32_MAX)
447 *entry = (*priv->mr.cache.table)[idx];
449 /* Falling back to the slowest path. */
450 mr = mr_lookup_dev_list(dev, entry, addr);
454 assert(lkey == UINT32_MAX || (addr >= entry->start &&
460 * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
461 * can raise memory free event and the callback function will spin on the lock.
464 * Pointer to MR to free.
467 mr_free(struct mlx4_mr *mr)
471 DEBUG("freeing MR(%p):", (void *)mr);
472 if (mr->ibv_mr != NULL)
473 claim_zero(mlx4_glue->dereg_mr(mr->ibv_mr));
474 if (mr->ms_bmp != NULL)
475 rte_bitmap_free(mr->ms_bmp);
480 * Releass resources of detached MR having no online entry.
483 * Pointer to Ethernet device.
486 mlx4_mr_garbage_collect(struct rte_eth_dev *dev)
488 struct mlx4_priv *priv = dev->data->dev_private;
489 struct mlx4_mr *mr_next;
490 struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
492 /* Must be called from the primary process. */
493 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
495 * MR can't be freed with holding the lock because rte_free() could call
496 * memory free callback function. This will be a deadlock situation.
498 rte_rwlock_write_lock(&priv->mr.rwlock);
499 /* Detach the whole free list and release it after unlocking. */
500 free_list = priv->mr.mr_free_list;
501 LIST_INIT(&priv->mr.mr_free_list);
502 rte_rwlock_write_unlock(&priv->mr.rwlock);
503 /* Release resources. */
504 mr_next = LIST_FIRST(&free_list);
505 while (mr_next != NULL) {
506 struct mlx4_mr *mr = mr_next;
508 mr_next = LIST_NEXT(mr, mr);
513 /* Called during rte_memseg_contig_walk() by mlx4_mr_create(). */
515 mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
516 const struct rte_memseg *ms, size_t len, void *arg)
518 struct mr_find_contig_memsegs_data *data = arg;
520 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
522 /* Found, save it and stop walking. */
523 data->start = ms->addr_64;
524 data->end = ms->addr_64 + len;
530 * Create a new global Memroy Region (MR) for a missing virtual address.
531 * Register entire virtually contiguous memory chunk around the address.
534 * Pointer to Ethernet device.
536 * Pointer to returning MR cache entry, found in the global cache or newly
537 * created. If failed to create one, this will not be updated.
539 * Target virtual address to register.
542 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
545 mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
548 struct mlx4_priv *priv = dev->data->dev_private;
549 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
550 const struct rte_memseg_list *msl;
551 const struct rte_memseg *ms;
552 struct mlx4_mr *mr = NULL;
557 int ms_idx_shift = -1;
559 struct mr_find_contig_memsegs_data data = {
562 struct mr_find_contig_memsegs_data data_re;
564 DEBUG("port %u creating a MR using address (%p)",
565 dev->data->port_id, (void *)addr);
566 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
567 WARN("port %u using address (%p) of unregistered mempool"
568 " in secondary process, please create mempool"
569 " before rte_eth_dev_start()",
570 dev->data->port_id, (void *)addr);
575 * Release detached MRs if any. This can't be called with holding either
576 * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have
577 * been detached by the memory free event but it couldn't be released
578 * inside the callback due to deadlock. As a result, releasing resources
579 * is quite opportunistic.
581 mlx4_mr_garbage_collect(dev);
583 * If enabled, find out a contiguous virtual address chunk in use, to
584 * which the given address belongs, in order to register maximum range.
585 * In the best case where mempools are not dynamically recreated and
586 * '--socket-mem' is specified as an EAL option, it is very likely to
587 * have only one MR(LKey) per a socket and per a hugepage-size even
588 * though the system memory is highly fragmented. As the whole memory
589 * chunk will be pinned by kernel, it can't be reused unless entire
590 * chunk is freed from EAL.
592 * If disabled, just register one memseg (page). Then, memory
593 * consumption will be minimized but it may drop performance if there
594 * are many MRs to lookup on the datapath.
596 if (!priv->mr_ext_memseg_en) {
597 data.msl = rte_mem_virt2memseg_list((void *)addr);
598 data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
599 data.end = data.start + data.msl->page_sz;
600 } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
601 WARN("port %u unable to find virtually contiguous"
602 " chunk for address (%p)."
603 " rte_memseg_contig_walk() failed.",
604 dev->data->port_id, (void *)addr);
609 /* Addresses must be page-aligned. */
610 assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
611 assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
613 ms = rte_mem_virt2memseg((void *)data.start, msl);
614 len = data.end - data.start;
615 assert(msl->page_sz == ms->hugepage_sz);
616 /* Number of memsegs in the range. */
617 ms_n = len / msl->page_sz;
618 DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
619 " page_sz=0x%" PRIx64 ", ms_n=%u",
620 dev->data->port_id, (void *)addr,
621 data.start, data.end, msl->page_sz, ms_n);
622 /* Size of memory for bitmap. */
623 bmp_size = rte_bitmap_get_memory_footprint(ms_n);
624 mr = rte_zmalloc_socket(NULL,
625 RTE_ALIGN_CEIL(sizeof(*mr),
626 RTE_CACHE_LINE_SIZE) +
628 RTE_CACHE_LINE_SIZE, msl->socket_id);
630 WARN("port %u unable to allocate memory for a new MR of"
632 dev->data->port_id, (void *)addr);
638 * Save the index of the first memseg and initialize memseg bitmap. To
639 * see if a memseg of ms_idx in the memseg-list is still valid, check:
640 * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
642 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
643 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
644 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
645 if (mr->ms_bmp == NULL) {
646 WARN("port %u unable to initialize bitamp for a new MR of"
648 dev->data->port_id, (void *)addr);
653 * Should recheck whether the extended contiguous chunk is still valid.
654 * Because memory_hotplug_lock can't be held if there's any memory
655 * related calls in a critical path, resource allocation above can't be
656 * locked. If the memory has been changed at this point, try again with
657 * just single page. If not, go on with the big chunk atomically from
660 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
662 if (len > msl->page_sz &&
663 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
664 WARN("port %u unable to find virtually contiguous"
665 " chunk for address (%p)."
666 " rte_memseg_contig_walk() failed.",
667 dev->data->port_id, (void *)addr);
671 if (data.start != data_re.start || data.end != data_re.end) {
673 * The extended contiguous chunk has been changed. Try again
674 * with single memseg instead.
676 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
677 data.end = data.start + msl->page_sz;
678 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
680 goto alloc_resources;
682 assert(data.msl == data_re.msl);
683 rte_rwlock_write_lock(&priv->mr.rwlock);
685 * Check the address is really missing. If other thread already created
686 * one or it is not found due to overflow, abort and return.
688 if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
690 * Insert to the global cache table. It may fail due to
691 * low-on-memory. Then, this entry will have to be searched
694 mr_btree_insert(&priv->mr.cache, entry);
695 DEBUG("port %u found MR for %p on final lookup, abort",
696 dev->data->port_id, (void *)addr);
697 rte_rwlock_write_unlock(&priv->mr.rwlock);
698 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
700 * Must be unlocked before calling rte_free() because
701 * mlx4_mr_mem_event_free_cb() can be called inside.
707 * Trim start and end addresses for verbs MR. Set bits for registering
708 * memsegs but exclude already registered ones. Bitmap can be
711 for (n = 0; n < ms_n; ++n) {
713 struct mlx4_mr_cache ret;
715 memset(&ret, 0, sizeof(ret));
716 start = data_re.start + n * msl->page_sz;
717 /* Exclude memsegs already registered by other MRs. */
718 if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
720 * Start from the first unregistered memseg in the
723 if (ms_idx_shift == -1) {
724 mr->ms_base_idx += n;
728 data.end = start + msl->page_sz;
729 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
733 len = data.end - data.start;
734 mr->ms_bmp_n = len / msl->page_sz;
735 assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
737 * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
738 * called with holding the memory lock because it doesn't use
739 * mlx4_alloc_buf_extern() which eventually calls rte_malloc_socket()
740 * through mlx4_alloc_verbs_buf().
742 mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)data.start, len,
743 IBV_ACCESS_LOCAL_WRITE);
744 if (mr->ibv_mr == NULL) {
745 WARN("port %u fail to create a verbs MR for address (%p)",
746 dev->data->port_id, (void *)addr);
750 assert((uintptr_t)mr->ibv_mr->addr == data.start);
751 assert(mr->ibv_mr->length == len);
752 LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
753 DEBUG("port %u MR CREATED (%p) for %p:\n"
754 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
755 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
756 dev->data->port_id, (void *)mr, (void *)addr,
757 data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
758 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
759 /* Insert to the global cache table. */
760 mr_insert_dev_cache(dev, mr);
761 /* Fill in output data. */
762 mr_lookup_dev(dev, entry, addr);
763 /* Lookup can't fail. */
764 assert(entry->lkey != UINT32_MAX);
765 rte_rwlock_write_unlock(&priv->mr.rwlock);
766 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
769 rte_rwlock_write_unlock(&priv->mr.rwlock);
771 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
774 * In case of error, as this can be called in a datapath, a warning
775 * message per an error is preferable instead. Must be unlocked before
776 * calling rte_free() because mlx4_mr_mem_event_free_cb() can be called
784 * Rebuild the global B-tree cache of device from the original MR list.
787 * Pointer to Ethernet device.
790 mr_rebuild_dev_cache(struct rte_eth_dev *dev)
792 struct mlx4_priv *priv = dev->data->dev_private;
795 DEBUG("port %u rebuild dev cache[]", dev->data->port_id);
796 /* Flush cache to rebuild. */
797 priv->mr.cache.len = 1;
798 priv->mr.cache.overflow = 0;
799 /* Iterate all the existing MRs. */
800 LIST_FOREACH(mr, &priv->mr.mr_list, mr)
801 if (mr_insert_dev_cache(dev, mr) < 0)
806 * Callback for memory free event. Iterate freed memsegs and check whether it
807 * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
808 * result, the MR would be fragmented. If it becomes empty, the MR will be freed
809 * later by mlx4_mr_garbage_collect().
811 * The global cache must be rebuilt if there's any change and this event has to
812 * be propagated to dataplane threads to flush the local caches.
815 * Pointer to Ethernet device.
817 * Address of freed memory.
819 * Size of freed memory.
822 mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
824 struct mlx4_priv *priv = dev->data->dev_private;
825 const struct rte_memseg_list *msl;
831 DEBUG("port %u free callback: addr=%p, len=%zu",
832 dev->data->port_id, addr, len);
833 msl = rte_mem_virt2memseg_list(addr);
834 /* addr and len must be page-aligned. */
835 assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
836 assert(len == RTE_ALIGN(len, msl->page_sz));
837 ms_n = len / msl->page_sz;
838 rte_rwlock_write_lock(&priv->mr.rwlock);
839 /* Clear bits of freed memsegs from MR. */
840 for (i = 0; i < ms_n; ++i) {
841 const struct rte_memseg *ms;
842 struct mlx4_mr_cache entry;
847 /* Find MR having this memseg. */
848 start = (uintptr_t)addr + i * msl->page_sz;
849 mr = mr_lookup_dev_list(dev, &entry, start);
852 assert(mr->msl); /* Can't be external memory. */
853 ms = rte_mem_virt2memseg((void *)start, msl);
855 assert(msl->page_sz == ms->hugepage_sz);
856 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
857 pos = ms_idx - mr->ms_base_idx;
858 assert(rte_bitmap_get(mr->ms_bmp, pos));
859 assert(pos < mr->ms_bmp_n);
860 DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
861 dev->data->port_id, (void *)mr, pos, (void *)start);
862 rte_bitmap_clear(mr->ms_bmp, pos);
863 if (--mr->ms_n == 0) {
865 LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
866 DEBUG("port %u remove MR(%p) from list",
867 dev->data->port_id, (void *)mr);
870 * MR is fragmented or will be freed. the global cache must be
876 mr_rebuild_dev_cache(dev);
878 * Flush local caches by propagating invalidation across cores.
879 * rte_smp_wmb() is enough to synchronize this event. If one of
880 * freed memsegs is seen by other core, that means the memseg
881 * has been allocated by allocator, which will come after this
882 * free call. Therefore, this store instruction (incrementing
883 * generation below) will be guaranteed to be seen by other core
884 * before the core sees the newly allocated memory.
887 DEBUG("broadcasting local cache flush, gen=%d",
891 rte_rwlock_write_unlock(&priv->mr.rwlock);
894 mlx4_mr_dump_dev(dev);
899 * Callback for memory event.
909 mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
910 size_t len, void *arg __rte_unused)
912 struct mlx4_priv *priv;
913 struct mlx4_dev_list *dev_list = &mlx4_shared_data->mem_event_cb_list;
915 /* Must be called from the primary process. */
916 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
917 switch (event_type) {
918 case RTE_MEM_EVENT_FREE:
919 rte_rwlock_read_lock(&mlx4_shared_data->mem_event_rwlock);
920 /* Iterate all the existing mlx4 devices. */
921 LIST_FOREACH(priv, dev_list, mem_event_cb)
922 mlx4_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
923 rte_rwlock_read_unlock(&mlx4_shared_data->mem_event_rwlock);
925 case RTE_MEM_EVENT_ALLOC:
932 * Look up address in the global MR cache table. If not found, create a new MR.
933 * Insert the found/created entry to local bottom-half cache table.
936 * Pointer to Ethernet device.
938 * Pointer to per-queue MR control structure.
940 * Pointer to returning MR cache entry, found in the global cache or newly
941 * created. If failed to create one, this is not written.
946 * Searched LKey on success, UINT32_MAX on no match.
949 mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
950 struct mlx4_mr_cache *entry, uintptr_t addr)
952 struct mlx4_priv *priv = dev->data->dev_private;
953 struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh;
957 /* If local cache table is full, try to double it. */
958 if (unlikely(bt->len == bt->size))
959 mr_btree_expand(bt, bt->size << 1);
960 /* Look up in the global cache. */
961 rte_rwlock_read_lock(&priv->mr.rwlock);
962 lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
963 if (lkey != UINT32_MAX) {
965 *entry = (*priv->mr.cache.table)[idx];
966 rte_rwlock_read_unlock(&priv->mr.rwlock);
968 * Update local cache. Even if it fails, return the found entry
969 * to update top-half cache. Next time, this entry will be found
970 * in the global cache.
972 mr_btree_insert(bt, entry);
975 rte_rwlock_read_unlock(&priv->mr.rwlock);
976 /* First time to see the address? Create a new MR. */
977 lkey = mlx4_mr_create(dev, entry, addr);
979 * Update the local cache if successfully created a new global MR. Even
980 * if failed to create one, there's no action to take in this datapath
981 * code. As returning LKey is invalid, this will eventually make HW
984 if (lkey != UINT32_MAX)
985 mr_btree_insert(bt, entry);
990 * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
991 * misses, search in the global MR cache table and update the new entry to
992 * per-queue local caches.
995 * Pointer to Ethernet device.
997 * Pointer to per-queue MR control structure.
1002 * Searched LKey on success, UINT32_MAX on no match.
1005 mlx4_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
1009 uint16_t bh_idx = 0;
1010 /* Victim in top-half cache to replace with new entry. */
1011 struct mlx4_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
1013 /* Binary-search MR translation table. */
1014 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
1015 /* Update top-half cache. */
1016 if (likely(lkey != UINT32_MAX)) {
1017 *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
1020 * If missed in local lookup table, search in the global cache
1021 * and local cache_bh[] will be updated inside if possible.
1022 * Top-half cache entry will also be updated.
1024 lkey = mlx4_mr_lookup_dev(dev, mr_ctrl, repl, addr);
1025 if (unlikely(lkey == UINT32_MAX))
1028 /* Update the most recently used entry. */
1029 mr_ctrl->mru = mr_ctrl->head;
1030 /* Point to the next victim, the oldest. */
1031 mr_ctrl->head = (mr_ctrl->head + 1) % MLX4_MR_CACHE_N;
1036 * Bottom-half of LKey search on Rx.
1039 * Pointer to Rx queue structure.
1044 * Searched LKey on success, UINT32_MAX on no match.
1047 mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr)
1049 struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
1050 struct mlx4_priv *priv = rxq->priv;
1052 return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1056 * Bottom-half of LKey search on Tx.
1059 * Pointer to Tx queue structure.
1064 * Searched LKey on success, UINT32_MAX on no match.
1067 mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
1069 struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
1070 struct mlx4_priv *priv = txq->priv;
1072 return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
1076 * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
1077 * list, register the mempool of the mbuf as externally allocated memory.
1080 * Pointer to Tx queue structure.
1085 * Searched LKey on success, UINT32_MAX on no match.
1088 mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb)
1090 uintptr_t addr = (uintptr_t)mb->buf_addr;
1093 lkey = mlx4_tx_addr2mr_bh(txq, addr);
1094 if (lkey == UINT32_MAX && rte_errno == ENXIO) {
1095 /* Mempool may have externally allocated memory. */
1096 return mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb));
1102 * Flush all of the local cache entries.
1105 * Pointer to per-queue MR control structure.
1108 mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl)
1110 /* Reset the most-recently-used index. */
1112 /* Reset the linear search array. */
1114 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1115 /* Reset the B-tree table. */
1116 mr_ctrl->cache_bh.len = 1;
1117 mr_ctrl->cache_bh.overflow = 0;
1118 /* Update the generation number. */
1119 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1120 DEBUG("mr_ctrl(%p): flushed, cur_gen=%d",
1121 (void *)mr_ctrl, mr_ctrl->cur_gen);
1125 * Called during rte_mempool_mem_iter() by mlx4_mr_update_ext_mp().
1127 * Externally allocated chunk is registered and a MR is created for the chunk.
1128 * The MR object is added to the global list. If memseg list of a MR object
1129 * (mr->msl) is null, the MR object can be regarded as externally allocated
1132 * Once external memory is registered, it should be static. If the memory is
1133 * freed and the virtual address range has different physical memory mapped
1134 * again, it may cause crash on device due to the wrong translation entry. PMD
1135 * can't track the free event of the external memory for now.
1138 mlx4_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
1139 struct rte_mempool_memhdr *memhdr,
1140 unsigned mem_idx __rte_unused)
1142 struct mr_update_mp_data *data = opaque;
1143 struct rte_eth_dev *dev = data->dev;
1144 struct mlx4_priv *priv = dev->data->dev_private;
1145 struct mlx4_mr_ctrl *mr_ctrl = data->mr_ctrl;
1146 struct mlx4_mr *mr = NULL;
1147 uintptr_t addr = (uintptr_t)memhdr->addr;
1148 size_t len = memhdr->len;
1149 struct mlx4_mr_cache entry;
1152 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
1153 /* If already registered, it should return. */
1154 rte_rwlock_read_lock(&priv->mr.rwlock);
1155 lkey = mr_lookup_dev(dev, &entry, addr);
1156 rte_rwlock_read_unlock(&priv->mr.rwlock);
1157 if (lkey != UINT32_MAX)
1159 mr = rte_zmalloc_socket(NULL,
1160 RTE_ALIGN_CEIL(sizeof(*mr),
1161 RTE_CACHE_LINE_SIZE),
1162 RTE_CACHE_LINE_SIZE, mp->socket_id);
1164 WARN("port %u unable to allocate memory for a new MR of"
1166 dev->data->port_id, mp->name);
1170 DEBUG("port %u register MR for chunk #%d of mempool (%s)",
1171 dev->data->port_id, mem_idx, mp->name);
1172 mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)addr, len,
1173 IBV_ACCESS_LOCAL_WRITE);
1174 if (mr->ibv_mr == NULL) {
1175 WARN("port %u fail to create a verbs MR for address (%p)",
1176 dev->data->port_id, (void *)addr);
1181 mr->msl = NULL; /* Mark it is external memory. */
1185 rte_rwlock_write_lock(&priv->mr.rwlock);
1186 LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
1187 DEBUG("port %u MR CREATED (%p) for external memory %p:\n"
1188 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1189 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1190 dev->data->port_id, (void *)mr, (void *)addr,
1191 addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
1192 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1193 /* Insert to the global cache table. */
1194 mr_insert_dev_cache(dev, mr);
1195 rte_rwlock_write_unlock(&priv->mr.rwlock);
1196 /* Insert to the local cache table */
1197 mlx4_mr_addr2mr_bh(dev, mr_ctrl, addr);
1201 * Register MR for entire memory chunks in a Mempool having externally allocated
1202 * memory and fill in local cache.
1205 * Pointer to Ethernet device.
1207 * Pointer to per-queue MR control structure.
1209 * Pointer to registering Mempool.
1212 * 0 on success, -1 on failure.
1215 mlx4_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
1216 struct rte_mempool *mp)
1218 struct mr_update_mp_data data = {
1224 rte_mempool_mem_iter(mp, mlx4_mr_update_ext_mp_cb, &data);
1229 * Register MR entire memory chunks in a Mempool having externally allocated
1230 * memory and search LKey of the address to return.
1233 * Pointer to Ethernet device.
1237 * Pointer to registering Mempool where addr belongs.
1240 * LKey for address on success, UINT32_MAX on failure.
1243 mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, struct rte_mempool *mp)
1245 struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
1246 struct mlx4_priv *priv = txq->priv;
1248 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1249 WARN("port %u using address (%p) from unregistered mempool"
1250 " having externally allocated memory"
1251 " in secondary process, please create mempool"
1252 " prior to rte_eth_dev_start()",
1253 PORT_ID(priv), (void *)addr);
1256 mlx4_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
1257 return mlx4_tx_addr2mr_bh(txq, addr);
1260 /* Called during rte_mempool_mem_iter() by mlx4_mr_update_mp(). */
1262 mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
1263 struct rte_mempool_memhdr *memhdr,
1264 unsigned mem_idx __rte_unused)
1266 struct mr_update_mp_data *data = opaque;
1269 /* Stop iteration if failed in the previous walk. */
1272 /* Register address of the chunk and update local caches. */
1273 lkey = mlx4_mr_addr2mr_bh(data->dev, data->mr_ctrl,
1274 (uintptr_t)memhdr->addr);
1275 if (lkey == UINT32_MAX)
1280 * Register entire memory chunks in a Mempool.
1283 * Pointer to Ethernet device.
1285 * Pointer to per-queue MR control structure.
1287 * Pointer to registering Mempool.
1290 * 0 on success, -1 on failure.
1293 mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
1294 struct rte_mempool *mp)
1296 struct mr_update_mp_data data = {
1302 rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data);
1303 if (data.ret < 0 && rte_errno == ENXIO) {
1304 /* Mempool may have externally allocated memory. */
1305 return mlx4_mr_update_ext_mp(dev, mr_ctrl, mp);
1312 * Dump all the created MRs and the global cache entries.
1315 * Pointer to Ethernet device.
1318 mlx4_mr_dump_dev(struct rte_eth_dev *dev)
1320 struct mlx4_priv *priv = dev->data->dev_private;
1325 rte_rwlock_read_lock(&priv->mr.rwlock);
1326 /* Iterate all the existing MRs. */
1327 LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
1330 DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
1331 dev->data->port_id, mr_n++,
1332 rte_cpu_to_be_32(mr->ibv_mr->lkey),
1333 mr->ms_n, mr->ms_bmp_n);
1336 for (n = 0; n < mr->ms_bmp_n; ) {
1337 struct mlx4_mr_cache ret;
1339 memset(&ret, 0, sizeof(ret));
1340 n = mr_find_next_chunk(mr, &ret, n);
1343 DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1344 chunk_n++, ret.start, ret.end);
1347 DEBUG("port %u dumping global cache", dev->data->port_id);
1348 mlx4_mr_btree_dump(&priv->mr.cache);
1349 rte_rwlock_read_unlock(&priv->mr.rwlock);
1354 * Release all the created MRs and resources. Remove device from memory callback
1358 * Pointer to Ethernet device.
1361 mlx4_mr_release(struct rte_eth_dev *dev)
1363 struct mlx4_priv *priv = dev->data->dev_private;
1364 struct mlx4_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
1366 /* Remove from memory callback device list. */
1367 rte_rwlock_write_lock(&mlx4_shared_data->mem_event_rwlock);
1368 LIST_REMOVE(priv, mem_event_cb);
1369 rte_rwlock_write_unlock(&mlx4_shared_data->mem_event_rwlock);
1371 mlx4_mr_dump_dev(dev);
1373 rte_rwlock_write_lock(&priv->mr.rwlock);
1374 /* Detach from MR list and move to free list. */
1375 while (mr_next != NULL) {
1376 struct mlx4_mr *mr = mr_next;
1378 mr_next = LIST_NEXT(mr, mr);
1379 LIST_REMOVE(mr, mr);
1380 LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
1382 LIST_INIT(&priv->mr.mr_list);
1383 /* Free global cache. */
1384 mlx4_mr_btree_free(&priv->mr.cache);
1385 rte_rwlock_write_unlock(&priv->mr.rwlock);
1386 /* Free all remaining MRs. */
1387 mlx4_mr_garbage_collect(dev);