*
* @param mr_ctrl
* Pointer to MR control structure.
- * @param cdev
- * Pointer to the mlx5 device structure.
+ * @param dev_gen_ptr
+ * Pointer to generation number of global cache.
* @param socket
* NUMA socket on which memory must be allocated.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, struct mlx5_common_device *cdev,
+mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
int socket)
{
if (mr_ctrl == NULL) {
rte_errno = EINVAL;
return -rte_errno;
}
- mr_ctrl->cdev = cdev;
/* Save pointer of global generation number to check memory event. */
- mr_ctrl->dev_gen_ptr = &cdev->mr_scache.dev_gen;
+ mr_ctrl->dev_gen_ptr = dev_gen_ptr;
/* Initialize B-tree and allocate memory for bottom-half cache table. */
return mlx5_mr_btree_init(&mr_ctrl->cache_bh, MLX5_MR_BTREE_CACHE_N,
socket);
mr_lookup_caches(struct mlx5_mr_ctrl *mr_ctrl,
struct mr_cache_entry *entry, uintptr_t addr)
{
- struct mlx5_mr_share_cache *share_cache = &mr_ctrl->cdev->mr_scache;
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
+ struct mlx5_common_device *cdev =
+ container_of(share_cache, struct mlx5_common_device, mr_scache);
struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
uint32_t lkey;
uint16_t idx;
}
rte_rwlock_read_unlock(&share_cache->rwlock);
/* First time to see the address? Create a new MR. */
- lkey = mlx5_mr_create(mr_ctrl->cdev, share_cache, entry, addr);
+ lkey = mlx5_mr_create(cdev, share_cache, entry, addr);
/*
* Update the local cache if successfully created a new global MR. Even
* if failed to create one, there's no action to take in this datapath
/**
* Update bottom-half cache from the list of mempool registrations.
*
- * @param share_cache
- * Pointer to a global shared MR cache.
* @param mr_ctrl
* Per-queue MR control handle.
* @param entry
* MR lkey on success, UINT32_MAX on failure.
*/
static uint32_t
-mlx5_lookup_mempool_regs(struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
+mlx5_lookup_mempool_regs(struct mlx5_mr_ctrl *mr_ctrl,
struct mr_cache_entry *entry,
struct rte_mempool *mp, uintptr_t addr)
{
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
struct mlx5_mempool_reg *mpr;
uint32_t lkey = UINT32_MAX;
/**
* Bottom-half lookup for the address from the mempool.
*
- * @param share_cache
- * Pointer to a global shared MR cache.
* @param mr_ctrl
* Per-queue MR control handle.
* @param mp
* MR lkey on success, UINT32_MAX on failure.
*/
uint32_t
-mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
+mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
struct rte_mempool *mp, uintptr_t addr)
{
struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
if (likely(lkey != UINT32_MAX)) {
*repl = (*mr_ctrl->cache_bh.table)[bh_idx];
} else {
- lkey = mlx5_lookup_mempool_regs(share_cache, mr_ctrl, repl,
- mp, addr);
+ lkey = mlx5_lookup_mempool_regs(mr_ctrl, repl, mp, addr);
/* Can only fail if the address is not from the mempool. */
if (unlikely(lkey == UINT32_MAX))
return UINT32_MAX;
{
uint32_t lkey;
uintptr_t addr = (uintptr_t)mb->buf_addr;
- struct mlx5_common_device *cdev = mr_ctrl->cdev;
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
+ struct mlx5_common_device *cdev =
+ container_of(share_cache, struct mlx5_common_device, mr_scache);
if (cdev->config.mr_mempool_reg_en) {
struct rte_mempool *mp = NULL;
mp = buf->mp;
}
if (mp != NULL) {
- lkey = mlx5_mr_mempool2mr_bh(&cdev->mr_scache,
- mr_ctrl, mp, addr);
+ lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
/*
* Lookup can only fail on invalid input, e.g. "addr"
* is not from "mp" or "mp" has MEMPOOL_F_NON_IO set.
/* Per-queue MR control descriptor. */
struct mlx5_mr_ctrl {
- struct mlx5_common_device *cdev; /* Pointer to the mlx5 common device.*/
uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
uint32_t cur_gen; /* Generation number saved to flush caches. */
uint16_t mru; /* Index of last hit entry in top-half cache. */
/* mlx5_common_mr.c */
__rte_internal
-int mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl,
- struct mlx5_common_device *cdev, int socket);
+int mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
+ int socket);
__rte_internal
void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
__rte_internal
-uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
+uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
struct rte_mempool *mp, uintptr_t addr);
void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);
if (likely(lkey != UINT32_MAX))
return lkey;
mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
- return mlx5_mr_mempool2mr_bh(&mr_ctrl->cdev->mr_scache, mr_ctrl,
- mp, addr);
+ return mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
}
/**
{
struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
uintptr_t addr = (uintptr_t)mb->buf_addr;
- struct mlx5_rxq_ctrl *rxq_ctrl;
uint32_t lkey;
/* Linear search on MR cache array. */
MLX5_MR_CACHE_N, addr);
if (likely(lkey != UINT32_MAX))
return lkey;
- /*
- * Slower search in the mempool database on miss.
- * During queue creation rxq->sh is not yet set, so we use rxq_ctrl.
- */
- rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- return mlx5_mr_mempool2mr_bh(&rxq_ctrl->sh->cdev->mr_scache,
- mr_ctrl, mb->pool, addr);
+ /* Slower search in the mempool database on miss. */
+ return mlx5_mr_mempool2mr_bh(mr_ctrl, mb->pool, addr);
}
/**