+/**
+ * Populate cache with LKeys of all MRs used by the mempool.
+ * It is intended to be used to register Rx mempools in advance.
+ *
+ * @param mr_ctrl
+ * Per-queue MR control handle.
+ * @param mp
+ * Registered memory pool.
+ *
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
+ struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
+ struct mlx5_mempool_reg *mpr;
+ unsigned int i;
+
+ /*
+ * Registration is valid after the lock is released,
+ * because the function is called after the mempool is registered.
+ */
+ rte_rwlock_read_lock(&share_cache->rwlock);
+ mpr = mlx5_mempool_reg_lookup(share_cache, mp);
+ rte_rwlock_read_unlock(&share_cache->rwlock);
+ if (mpr == NULL) {
+ DRV_LOG(ERR, "Mempool %s is not registered", mp->name);
+ rte_errno = ENOENT;
+ return -1;
+ }
+ for (i = 0; i < mpr->mrs_n; i++) {
+ struct mlx5_mempool_mr *mr = &mpr->mrs[i];
+ struct mr_cache_entry entry;
+ uint32_t lkey;
+ uint16_t idx;
+
+ lkey = mr_btree_lookup(bt, &idx, (uintptr_t)mr->pmd_mr.addr);
+ if (lkey != UINT32_MAX)
+ continue;
+ if (bt->len == bt->size)
+ mr_btree_expand(bt, bt->size << 1);
+ entry.start = (uintptr_t)mr->pmd_mr.addr;
+ entry.end = entry.start + mr->pmd_mr.len;
+ entry.lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
+ if (mr_btree_insert(bt, &entry) < 0) {
+ DRV_LOG(ERR, "Cannot insert cache entry for mempool %s MR %08x",
+ mp->name, entry.lkey);
+ rte_errno = EINVAL;
+ return -1;
+ }
+ }
+ return 0;
+}
+