+ DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id);
+ /* Flush cache to rebuild. */
+ priv->mr.cache.len = 1;
+ priv->mr.cache.overflow = 0;
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr)
+ if (mr_insert_dev_cache(dev, mr) < 0)
+ return;
+}
+
+/**
+ * Callback for memory free event. Iterate freed memsegs and check whether it
+ * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
+ * result, the MR would be fragmented. If it becomes empty, the MR will be freed
+ * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
+ * secondary process, the garbage collector will be called in primary process
+ * as the secondary process can't call mlx5_mr_create().
+ *
+ * The global cache must be rebuilt if there's any change and this event has to
+ * be propagated to dataplane threads to flush the local caches.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Address of freed memory.
+ * @param len
+ * Size of freed memory.
+ */
+static void
+mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_memseg_list *msl;
+ struct mlx5_mr *mr;
+ int ms_n;
+ int i;
+ int rebuild = 0;
+
+ DEBUG("port %u free callback: addr=%p, len=%zu",
+ dev->data->port_id, addr, len);
+ msl = rte_mem_virt2memseg_list(addr);
+ /* addr and len must be page-aligned. */
+ assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ assert(len == RTE_ALIGN(len, msl->page_sz));
+ ms_n = len / msl->page_sz;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Clear bits of freed memsegs from MR. */
+ for (i = 0; i < ms_n; ++i) {
+ const struct rte_memseg *ms;
+ struct mlx5_mr_cache entry;
+ uintptr_t start;
+ int ms_idx;
+ uint32_t pos;
+
+ /* Find MR having this memseg. */
+ start = (uintptr_t)addr + i * msl->page_sz;
+ mr = mr_lookup_dev_list(dev, &entry, start);
+ if (mr == NULL)
+ continue;
+ assert(mr->msl); /* Can't be external memory. */
+ ms = rte_mem_virt2memseg((void *)start, msl);
+ assert(ms != NULL);
+ assert(msl->page_sz == ms->hugepage_sz);
+ ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ pos = ms_idx - mr->ms_base_idx;
+ assert(rte_bitmap_get(mr->ms_bmp, pos));
+ assert(pos < mr->ms_bmp_n);
+ DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
+ dev->data->port_id, (void *)mr, pos, (void *)start);
+ rte_bitmap_clear(mr->ms_bmp, pos);
+ if (--mr->ms_n == 0) {
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
+ DEBUG("port %u remove MR(%p) from list",
+ dev->data->port_id, (void *)mr);
+ }
+ /*
+ * MR is fragmented or will be freed. the global cache must be
+ * rebuilt.
+ */
+ rebuild = 1;
+ }
+ if (rebuild) {
+ mr_rebuild_dev_cache(dev);
+ /*
+ * Flush local caches by propagating invalidation across cores.
+ * rte_smp_wmb() is enough to synchronize this event. If one of
+ * freed memsegs is seen by other core, that means the memseg
+ * has been allocated by allocator, which will come after this
+ * free call. Therefore, this store instruction (incrementing
+ * generation below) will be guaranteed to be seen by other core
+ * before the core sees the newly allocated memory.
+ */
+ ++priv->mr.dev_gen;
+ DEBUG("broadcasting local cache flush, gen=%d",
+ priv->mr.dev_gen);
+ rte_smp_wmb();
+ }
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+}
+
+/**
+ * Callback for memory event. This can be called from both primary and secondary
+ * process.
+ *
+ * @param event_type
+ * Memory event type.
+ * @param addr
+ * Address of memory.
+ * @param len
+ * Size of memory.
+ */
+void
+mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg __rte_unused)
+{
+ struct priv *priv;
+ struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
+
+ switch (event_type) {
+ case RTE_MEM_EVENT_FREE:
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ /* Iterate all the existing mlx5 devices. */
+ LIST_FOREACH(priv, dev_list, mem_event_cb)
+ mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ break;
+ case RTE_MEM_EVENT_ALLOC:
+ default:
+ break;
+ }
+}
+
+/**
+ * Look up address in the global MR cache table. If not found, create a new MR.
+ * Insert the found/created entry to local bottom-half cache table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this is not written.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct mlx5_mr_cache *entry, uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
+ uint16_t idx;
+ uint32_t lkey;
+
+ /* If local cache table is full, try to double it. */
+ if (unlikely(bt->len == bt->size))
+ mr_btree_expand(bt, bt->size << 1);
+ /* Look up in the global cache. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX) {
+ /* Found. */
+ *entry = (*priv->mr.cache.table)[idx];
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /*
+ * Update local cache. Even if it fails, return the found entry
+ * to update top-half cache. Next time, this entry will be found
+ * in the global cache.
+ */
+ mr_btree_insert(bt, entry);
+ return lkey;
+ }
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /* First time to see the address? Create a new MR. */
+ lkey = mlx5_mr_create(dev, entry, addr);
+ /*
+ * Update the local cache if successfully created a new global MR. Even
+ * if failed to create one, there's no action to take in this datapath
+ * code. As returning LKey is invalid, this will eventually make HW
+ * fail.
+ */
+ if (lkey != UINT32_MAX)
+ mr_btree_insert(bt, entry);
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
+ * misses, search in the global MR cache table and update the new entry to
+ * per-queue local caches.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx5_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ uintptr_t addr)
+{
+ uint32_t lkey;
+ uint16_t bh_idx = 0;
+ /* Victim in top-half cache to replace with new entry. */
+ struct mlx5_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
+
+ /* Binary-search MR translation table. */
+ lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
+ /* Update top-half cache. */
+ if (likely(lkey != UINT32_MAX)) {
+ *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
+ } else {
+ /*
+ * If missed in local lookup table, search in the global cache
+ * and local cache_bh[] will be updated inside if possible.
+ * Top-half cache entry will also be updated.
+ */
+ lkey = mlx5_mr_lookup_dev(dev, mr_ctrl, repl, addr);
+ if (unlikely(lkey == UINT32_MAX))
+ return UINT32_MAX;
+ }
+ /* Update the most recently used entry. */
+ mr_ctrl->mru = mr_ctrl->head;
+ /* Point to the next victim, the oldest. */
+ mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on Rx.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
+{
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ struct priv *priv = rxq_ctrl->priv;
+
+ DRV_LOG(DEBUG,
+ "Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ rxq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
+}
+
+/**
+ * Bottom-half of LKey search on Tx.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
+{
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct priv *priv = txq_ctrl->priv;
+
+ DRV_LOG(DEBUG,
+ "Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ txq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
+}
+
+/**
+ * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
+ * list, register the mempool of the mbuf as externally allocated memory.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param mb
+ * Pointer to mbuf.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
+{
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ uint32_t lkey;
+
+ lkey = mlx5_tx_addr2mr_bh(txq, addr);
+ if (lkey == UINT32_MAX && rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
+ }
+ return lkey;
+}
+
+/**
+ * Flush all of the local cache entries.
+ *
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ */
+void
+mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
+{
+ /* Reset the most-recently-used index. */
+ mr_ctrl->mru = 0;
+ /* Reset the linear search array. */
+ mr_ctrl->head = 0;
+ memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
+ /* Reset the B-tree table. */
+ mr_ctrl->cache_bh.len = 1;
+ mr_ctrl->cache_bh.overflow = 0;
+ /* Update the generation number. */
+ mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
+ DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
+ (void *)mr_ctrl, mr_ctrl->cur_gen);
+}
+
+/**
+ * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
+ *
+ * Externally allocated chunk is registered and a MR is created for the chunk.
+ * The MR object is added to the global list. If memseg list of a MR object
+ * (mr->msl) is null, the MR object can be regarded as externally allocated
+ * memory.
+ *
+ * Once external memory is registered, it should be static. If the memory is
+ * freed and the virtual address range has different physical memory mapped
+ * again, it may cause crash on device due to the wrong translation entry. PMD
+ * can't track the free event of the external memory for now.
+ */
+static void
+mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ struct rte_eth_dev *dev = data->dev;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
+ struct mlx5_mr *mr = NULL;
+ uintptr_t addr = (uintptr_t)memhdr->addr;
+ size_t len = memhdr->len;
+ struct mlx5_mr_cache entry;
+ uint32_t lkey;
+
+ /* If already registered, it should return. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_lookup_dev(dev, &entry, addr);
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ if (lkey != UINT32_MAX)
+ return;
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE),
+ RTE_CACHE_LINE_SIZE, mp->socket_id);
+ if (mr == NULL) {
+ DRV_LOG(WARNING,
+ "port %u unable to allocate memory for a new MR of"
+ " mempool (%s).",
+ dev->data->port_id, mp->name);
+ data->ret = -1;