mlx5_dev_mempool_register(struct mlx5_common_device *cdev,
struct rte_mempool *mp)
{
- struct mlx5_mp_id mp_id;
-
- mlx5_mp_id_init(&mp_id, 0);
- return mlx5_mr_mempool_register(&cdev->mr_scache, cdev->pd, mp, &mp_id);
+ return mlx5_mr_mempool_register(cdev, mp);
}
/**
mlx5_dev_mempool_unregister(struct mlx5_common_device *cdev,
struct rte_mempool *mp)
{
- struct mlx5_mp_id mp_id;
-
- mlx5_mp_id_init(&mp_id, 0);
- if (mlx5_mr_mempool_unregister(&cdev->mr_scache, mp, &mp_id) < 0)
+ if (mlx5_mr_mempool_unregister(cdev, mp) < 0)
DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
mp->name, cdev->pd, rte_strerror(rte_errno));
}
/**
* Request Memory Region creation to the primary process.
*
- * @param[in] mp_id
- * ID of the MP process.
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param addr
* Target virtual address to register.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_mp_req_mr_create(struct mlx5_mp_id *mp_id, uintptr_t addr)
+mlx5_mp_req_mr_create(struct mlx5_common_device *cdev, uintptr_t addr)
{
struct rte_mp_msg mp_req;
struct rte_mp_msg *mp_res;
struct rte_mp_reply mp_rep;
struct mlx5_mp_param *req = (struct mlx5_mp_param *)mp_req.param;
+ struct mlx5_mp_arg_mr_manage *arg = &req->args.mr_manage;
struct mlx5_mp_param *res;
struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
int ret;
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
- mp_init_msg(mp_id, &mp_req, MLX5_MP_REQ_CREATE_MR);
- req->args.addr = addr;
+ mp_init_port_agnostic_msg(&mp_req, MLX5_MP_REQ_CREATE_MR);
+ arg->addr = addr;
+ arg->cdev = cdev;
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
if (ret) {
- DRV_LOG(ERR, "port %u request to primary process failed",
- mp_id->port_id);
+ DRV_LOG(ERR, "Create MR request to primary process failed.");
return -rte_errno;
}
MLX5_ASSERT(mp_rep.nb_received == 1);
}
/**
- * @param mp_id
- * ID of the MP process.
- * @param share_cache
- * Shared MR cache.
- * @param pd
- * Protection domain.
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param mempool
* Mempool to register or unregister.
* @param reg
* True to register the mempool, False to unregister.
*/
int
-mlx5_mp_req_mempool_reg(struct mlx5_mp_id *mp_id,
- struct mlx5_mr_share_cache *share_cache, void *pd,
+mlx5_mp_req_mempool_reg(struct mlx5_common_device *cdev,
struct rte_mempool *mempool, bool reg)
{
struct rte_mp_msg mp_req;
struct rte_mp_msg *mp_res;
struct rte_mp_reply mp_rep;
struct mlx5_mp_param *req = (struct mlx5_mp_param *)mp_req.param;
- struct mlx5_mp_arg_mempool_reg *arg = &req->args.mempool_reg;
+ struct mlx5_mp_arg_mr_manage *arg = &req->args.mr_manage;
struct mlx5_mp_param *res;
struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
enum mlx5_mp_req_type type;
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
type = reg ? MLX5_MP_REQ_MEMPOOL_REGISTER :
MLX5_MP_REQ_MEMPOOL_UNREGISTER;
- mp_init_msg(mp_id, &mp_req, type);
- arg->share_cache = share_cache;
- arg->pd = pd;
+ mp_init_port_agnostic_msg(&mp_req, type);
arg->mempool = mempool;
+ arg->cdev = cdev;
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
if (ret) {
- DRV_LOG(ERR, "port %u request to primary process failed",
- mp_id->port_id);
+ DRV_LOG(ERR,
+ "Mempool %sregister request to primary process failed.",
+ reg ? "" : "un");
return -rte_errno;
}
MLX5_ASSERT(mp_rep.nb_received == 1);
uint16_t queue_id; /* DPDK queue ID. */
};
-struct mlx5_mp_arg_mempool_reg {
- struct mlx5_mr_share_cache *share_cache;
- void *pd; /* NULL for MLX5_MP_REQ_MEMPOOL_UNREGISTER */
- struct rte_mempool *mempool;
+struct mlx5_mp_arg_mr_manage {
+ struct mlx5_common_device *cdev;
+ union {
+ struct rte_mempool *mempool;
+ /* MLX5_MP_REQ_MEMPOOL_(UN)REGISTER */
+ uintptr_t addr; /* MLX5_MP_REQ_CREATE_MR */
+ };
};
-/* Pameters for IPC. */
+/* Parameters for IPC. */
struct mlx5_mp_param {
enum mlx5_mp_req_type type;
int port_id;
int result;
RTE_STD_C11
union {
- uintptr_t addr; /* MLX5_MP_REQ_CREATE_MR */
- struct mlx5_mp_arg_mempool_reg mempool_reg;
- /* MLX5_MP_REQ_MEMPOOL_(UN)REGISTER */
+ struct mlx5_mp_arg_mr_manage mr_manage;
+ /* MLX5_MP_REQ_MEMPOOL_(UN)REGISTER, MLX5_MP_REQ_CREATE_MR */
struct mlx5_mp_arg_queue_state_modify state_modify;
/* MLX5_MP_REQ_QUEUE_STATE_MODIFY */
struct mlx5_mp_arg_queue_id queue_id;
param->port_id = mp_id->port_id;
}
+/**
+ * Initialize IPC port-agnostic message.
+ *
+ * @param[out] msg
+ * Pointer to message to fill in.
+ * @param[in] type
+ * Message type.
+ */
+static inline void
+mp_init_port_agnostic_msg(struct rte_mp_msg *msg, enum mlx5_mp_req_type type)
+{
+ struct mlx5_mp_param *param = (struct mlx5_mp_param *)msg->param;
+
+ memset(msg, 0, sizeof(*msg));
+ strlcpy(msg->name, MLX5_MP_NAME, sizeof(msg->name));
+ msg->len_param = sizeof(*param);
+ param->type = type;
+}
+
__rte_internal
int mlx5_mp_init_primary(const char *name, const rte_mp_t primary_action);
__rte_internal
__rte_internal
void mlx5_mp_uninit_secondary(const char *name);
__rte_internal
-int mlx5_mp_req_mr_create(struct mlx5_mp_id *mp_id, uintptr_t addr);
+int mlx5_mp_req_mr_create(struct mlx5_common_device *cdev, uintptr_t addr);
__rte_internal
-int mlx5_mp_req_mempool_reg(struct mlx5_mp_id *mp_id,
- struct mlx5_mr_share_cache *share_cache, void *pd,
- struct rte_mempool *mempool, bool reg);
+int mlx5_mp_req_mempool_reg(struct mlx5_common_device *cdev,
+ struct rte_mempool *mempool, bool reg);
__rte_internal
int mlx5_mp_req_queue_state_modify(struct mlx5_mp_id *mp_id,
struct mlx5_mp_arg_queue_state_modify *sm);
* list is on the shared memory, following LKey lookup should succeed unless the
* request fails.
*
- * @param pd
- * Pointer to pd of a device (net, regex, vdpa,...).
- * @param mp_id
- * Multi-process identifier, may be NULL for the primary process.
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param share_cache
* Pointer to a global shared MR cache.
* @param[out] entry
* created. If failed to create one, this will not be updated.
* @param addr
* Target virtual address to register.
- * @param mr_ext_memseg_en
- * Configurable flag about external memory segment enable or not.
*
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
static uint32_t
-mlx5_mr_create_secondary(void *pd __rte_unused,
- struct mlx5_mp_id *mp_id,
+mlx5_mr_create_secondary(struct mlx5_common_device *cdev,
struct mlx5_mr_share_cache *share_cache,
- struct mr_cache_entry *entry, uintptr_t addr,
- unsigned int mr_ext_memseg_en __rte_unused)
+ struct mr_cache_entry *entry, uintptr_t addr)
{
int ret;
- if (mp_id == NULL) {
- rte_errno = EINVAL;
- return UINT32_MAX;
- }
- DRV_LOG(DEBUG, "port %u requesting MR creation for address (%p)",
- mp_id->port_id, (void *)addr);
- ret = mlx5_mp_req_mr_create(mp_id, addr);
+ DRV_LOG(DEBUG, "Requesting MR creation for address (%p)", (void *)addr);
+ ret = mlx5_mp_req_mr_create(cdev, addr);
if (ret) {
DRV_LOG(DEBUG, "Fail to request MR creation for address (%p)",
- (void *)addr);
+ (void *)addr);
return UINT32_MAX;
}
rte_rwlock_read_lock(&share_cache->rwlock);
MLX5_ASSERT(entry->lkey != UINT32_MAX);
rte_rwlock_read_unlock(&share_cache->rwlock);
DRV_LOG(DEBUG, "MR CREATED by primary process for %p:\n"
- " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
- (void *)addr, entry->start, entry->end, entry->lkey);
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
+ (void *)addr, entry->start, entry->end, entry->lkey);
return entry->lkey;
}
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-uint32_t
+static uint32_t
mlx5_mr_create_primary(void *pd,
struct mlx5_mr_share_cache *share_cache,
struct mr_cache_entry *entry, uintptr_t addr,
* Create a new global Memory Region (MR) for a missing virtual address.
* This can be called from primary and secondary process.
*
- * @param pd
- * Pointer to pd handle of a device (net, regex, vdpa,...).
- * @param mp_id
- * Multi-process identifier, may be NULL for the primary process.
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param share_cache
* Pointer to a global shared MR cache.
* @param[out] entry
* created. If failed to create one, this will not be updated.
* @param addr
* Target virtual address to register.
- * @param mr_ext_memseg_en
- * Configurable flag about external memory segment enable or not.
*
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-static uint32_t
-mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id,
+uint32_t
+mlx5_mr_create(struct mlx5_common_device *cdev,
struct mlx5_mr_share_cache *share_cache,
- struct mr_cache_entry *entry, uintptr_t addr,
- unsigned int mr_ext_memseg_en)
+ struct mr_cache_entry *entry, uintptr_t addr)
{
uint32_t ret = 0;
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
- ret = mlx5_mr_create_primary(pd, share_cache, entry,
- addr, mr_ext_memseg_en);
+ ret = mlx5_mr_create_primary(cdev->pd, share_cache, entry, addr,
+ cdev->config.mr_ext_memseg_en);
break;
case RTE_PROC_SECONDARY:
- ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry,
- addr, mr_ext_memseg_en);
+ ret = mlx5_mr_create_secondary(cdev, share_cache, entry, addr);
break;
default:
break;
* Look up address in the global MR cache table. If not found, create a new MR.
* Insert the found/created entry to local bottom-half cache table.
*
- * @param pd
- * Pointer to pd of a device (net, regex, vdpa,...).
- * @param mp_id
- * Multi-process identifier, may be NULL for the primary process.
- * @param share_cache
- * Pointer to a global shared MR cache.
* @param mr_ctrl
* Pointer to per-queue MR control structure.
* @param[out] entry
* created. If failed to create one, this is not written.
* @param addr
* Search key.
- * @param mr_ext_memseg_en
- * Configurable flag about external memory segment enable or not.
*
* @return
* Searched LKey on success, UINT32_MAX on no match.
*/
static uint32_t
-mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id,
- struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl,
- struct mr_cache_entry *entry, uintptr_t addr,
- unsigned int mr_ext_memseg_en)
+mr_lookup_caches(struct mlx5_mr_ctrl *mr_ctrl,
+ struct mr_cache_entry *entry, uintptr_t addr)
{
+ struct mlx5_mr_share_cache *share_cache = &mr_ctrl->cdev->mr_scache;
struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
uint32_t lkey;
uint16_t idx;
}
rte_rwlock_read_unlock(&share_cache->rwlock);
/* First time to see the address? Create a new MR. */
- lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr,
- mr_ext_memseg_en);
+ lkey = mlx5_mr_create(mr_ctrl->cdev, share_cache, entry, addr);
/*
* Update the local cache if successfully created a new global MR. Even
* if failed to create one, there's no action to take in this datapath
* misses, search in the global MR cache table and update the new entry to
* per-queue local caches.
*
- * @param pd
- * Pointer to pd of a device (net, regex, vdpa,...).
- * @param mp_id
- * Multi-process identifier, may be NULL for the primary process.
- * @param share_cache
- * Pointer to a global shared MR cache.
* @param mr_ctrl
* Pointer to per-queue MR control structure.
* @param addr
* Search key.
- * @param mr_ext_memseg_en
- * Configurable flag about external memory segment enable or not.
*
* @return
* Searched LKey on success, UINT32_MAX on no match.
*/
static uint32_t
-mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
- struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr,
- unsigned int mr_ext_memseg_en)
+mlx5_mr_addr2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, uintptr_t addr)
{
uint32_t lkey;
uint16_t bh_idx = 0;
* and local cache_bh[] will be updated inside if possible.
* Top-half cache entry will also be updated.
*/
- lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl,
- repl, addr, mr_ext_memseg_en);
+ lkey = mr_lookup_caches(mr_ctrl, repl, addr);
if (unlikely(lkey == UINT32_MAX))
return UINT32_MAX;
}
}
static int
-mlx5_mr_mempool_register_secondary(struct mlx5_mr_share_cache *share_cache,
- void *pd, struct rte_mempool *mp,
- struct mlx5_mp_id *mp_id)
+mlx5_mr_mempool_register_secondary(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp)
{
- if (mp_id == NULL) {
- rte_errno = EINVAL;
- return -1;
- }
- return mlx5_mp_req_mempool_reg(mp_id, share_cache, pd, mp, true);
+ return mlx5_mp_req_mempool_reg(cdev, mp, true);
}
/**
* Register the memory of a mempool in the protection domain.
*
- * @param share_cache
- * Shared MR cache of the protection domain.
- * @param pd
- * Protection domain object.
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param mp
* Mempool to register.
- * @param mp_id
- * Multi-process identifier, may be NULL for the primary process.
*
* @return
* 0 on success, (-1) on failure and rte_errno is set.
*/
int
-mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,
- struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
+mlx5_mr_mempool_register(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp)
{
if (mp->flags & RTE_MEMPOOL_F_NON_IO)
return 0;
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
- return mlx5_mr_mempool_register_primary(share_cache, pd, mp);
+ return mlx5_mr_mempool_register_primary(&cdev->mr_scache,
+ cdev->pd, mp);
case RTE_PROC_SECONDARY:
- return mlx5_mr_mempool_register_secondary(share_cache, pd, mp,
- mp_id);
+ return mlx5_mr_mempool_register_secondary(cdev, mp);
default:
return -1;
}
}
static int
-mlx5_mr_mempool_unregister_secondary(struct mlx5_mr_share_cache *share_cache,
- struct rte_mempool *mp,
- struct mlx5_mp_id *mp_id)
+mlx5_mr_mempool_unregister_secondary(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp)
{
- if (mp_id == NULL) {
- rte_errno = EINVAL;
- return -1;
- }
- return mlx5_mp_req_mempool_reg(mp_id, share_cache, NULL, mp, false);
+ return mlx5_mp_req_mempool_reg(cdev, mp, false);
}
/**
* Unregister the memory of a mempool from the protection domain.
*
- * @param share_cache
- * Shared MR cache of the protection domain.
+ * @param cdev
+ * Pointer to the mlx5 common device.
* @param mp
* Mempool to unregister.
- * @param mp_id
- * Multi-process identifier, may be NULL for the primary process.
*
* @return
* 0 on success, (-1) on failure and rte_errno is set.
*/
int
-mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,
- struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
+mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp)
{
if (mp->flags & RTE_MEMPOOL_F_NON_IO)
return 0;
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
- return mlx5_mr_mempool_unregister_primary(share_cache, mp);
+ return mlx5_mr_mempool_unregister_primary(&cdev->mr_scache, mp);
case RTE_PROC_SECONDARY:
- return mlx5_mr_mempool_unregister_secondary(share_cache, mp,
- mp_id);
+ return mlx5_mr_mempool_unregister_secondary(cdev, mp);
default:
return -1;
}
}
uint32_t
-mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb,
- struct mlx5_mp_id *mp_id)
+mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
{
uint32_t lkey;
uintptr_t addr = (uintptr_t)mb->buf_addr;
}
/* Fallback for generic mechanism in corner cases. */
}
- return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, &cdev->mr_scache, mr_ctrl,
- addr, cdev->config.mr_ext_memseg_en);
+ return mlx5_mr_addr2mr_bh(mr_ctrl, addr);
}
* Pointer to per-queue MR control structure.
* @param mb
* Pointer to mbuf.
- * @param mp_id
- * Multi-process identifier, may be NULL for the primary process.
*
* @return
* Searched LKey on success, UINT32_MAX on no match.
*/
__rte_internal
-uint32_t mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf,
- struct mlx5_mp_id *mp_id);
+uint32_t mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf);
/**
* Query LKey from a packet buffer.
* Pointer to per-queue MR control structure.
* @param mbuf
* Pointer to mbuf.
- * @param mp_id
- * Multi-process identifier, may be NULL for the primary process.
*
* @return
* Searched LKey on success, UINT32_MAX on no match.
*/
static __rte_always_inline uint32_t
-mlx5_mr_mb2mr(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf,
- struct mlx5_mp_id *mp_id)
+mlx5_mr_mb2mr(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf)
{
uint32_t lkey;
if (likely(lkey != UINT32_MAX))
return lkey;
/* Take slower bottom-half on miss. */
- return mlx5_mr_mb2mr_bh(mr_ctrl, mbuf, mp_id);
+ return mlx5_mr_mb2mr_bh(mr_ctrl, mbuf);
}
/* mlx5_common_mr.c */
void mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb);
__rte_internal
uint32_t
-mlx5_mr_create_primary(void *pd,
- struct mlx5_mr_share_cache *share_cache,
- struct mr_cache_entry *entry, uintptr_t addr,
- unsigned int mr_ext_memseg_en);
+mlx5_mr_create(struct mlx5_common_device *cdev,
+ struct mlx5_mr_share_cache *share_cache,
+ struct mr_cache_entry *entry, uintptr_t addr);
/* mlx5_common_verbs.c */
__rte_internal
int
-mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,
- struct rte_mempool *mp, struct mlx5_mp_id *mp_id);
+mlx5_mr_mempool_register(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp);
__rte_internal
int
-mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,
- struct rte_mempool *mp, struct mlx5_mp_id *mp_id);
+mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,
+ struct rte_mempool *mp);
#endif /* RTE_PMD_MLX5_COMMON_MR_H_ */
mlx5_mprq_buf_free_cb;
mlx5_mr_btree_free;
- mlx5_mr_create_primary;
+ mlx5_mr_create;
mlx5_mr_ctrl_init;
mlx5_mr_flush_local_cache;
mlx5_mr_mb2mr_bh;
uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
dseg->bcount = rte_cpu_to_be_32(len);
- dseg->lkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf, 0);
+ dseg->lkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf);
dseg->pbuf = rte_cpu_to_be_64(addr);
return dseg->lkey;
}
*remain -= data_len;
klm->bcount = rte_cpu_to_be_32(data_len);
klm->pbuf = rte_cpu_to_be_64(addr);
- klm->lkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf, 0);
+ klm->lkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf);
return klm->lkey;
}
struct mlx5_mp_param *res = (struct mlx5_mp_param *)mp_res.param;
const struct mlx5_mp_param *param =
(const struct mlx5_mp_param *)mp_msg->param;
- const struct mlx5_mp_arg_mempool_reg *mpr;
- struct mlx5_mp_id mp_id;
+ const struct mlx5_mp_arg_mr_manage *mng = ¶m->args.mr_manage;
+ struct mr_cache_entry entry;
+ uint32_t lkey;
switch (param->type) {
+ case MLX5_MP_REQ_CREATE_MR:
+ mp_init_port_agnostic_msg(&mp_res, param->type);
+ lkey = mlx5_mr_create(mng->cdev, &mng->cdev->mr_scache, &entry,
+ mng->addr);
+ if (lkey == UINT32_MAX)
+ res->result = -rte_errno;
+ return rte_mp_reply(&mp_res, peer);
case MLX5_MP_REQ_MEMPOOL_REGISTER:
- mlx5_mp_id_init(&mp_id, param->port_id);
- mp_init_msg(&mp_id, &mp_res, param->type);
- mpr = ¶m->args.mempool_reg;
- res->result = mlx5_mr_mempool_register(mpr->share_cache,
- mpr->pd, mpr->mempool,
- NULL);
+ mp_init_port_agnostic_msg(&mp_res, param->type);
+ res->result = mlx5_mr_mempool_register(mng->cdev, mng->mempool);
return rte_mp_reply(&mp_res, peer);
case MLX5_MP_REQ_MEMPOOL_UNREGISTER:
- mlx5_mp_id_init(&mp_id, param->port_id);
- mp_init_msg(&mp_id, &mp_res, param->type);
- mpr = ¶m->args.mempool_reg;
- res->result = mlx5_mr_mempool_unregister(mpr->share_cache,
- mpr->mempool, NULL);
+ mp_init_port_agnostic_msg(&mp_res, param->type);
+ res->result = mlx5_mr_mempool_unregister(mng->cdev,
+ mng->mempool);
return rte_mp_reply(&mp_res, peer);
default:
return 1;
struct rte_eth_dev *dev;
struct mlx5_priv *priv;
struct mlx5_common_device *cdev;
- struct mr_cache_entry entry;
- uint32_t lkey;
int ret;
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
priv = dev->data->dev_private;
cdev = priv->sh->cdev;
switch (param->type) {
- case MLX5_MP_REQ_CREATE_MR:
- mp_init_msg(&priv->mp_id, &mp_res, param->type);
- lkey = mlx5_mr_create_primary(cdev->pd, &cdev->mr_scache,
- &entry, param->args.addr,
- cdev->config.mr_ext_memseg_en);
- if (lkey == UINT32_MAX)
- res->result = -rte_errno;
- ret = rte_mp_reply(&mp_res, peer);
- break;
case MLX5_MP_REQ_VERBS_CMD_FD:
mp_init_msg(&priv->mp_id, &mp_res, param->type);
mp_res.num_fds = 1;
rte_errno = ENOMEM;
return -rte_errno;
}
- ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
- priv->sh->cdev->pd, mp, &priv->mp_id);
+ ret = mlx5_mr_mempool_register(priv->sh->cdev, mp);
if (ret < 0 && rte_errno != EEXIST) {
ret = rte_errno;
DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
* 0 on success, (-1) on failure and rte_errno is set.
*/
static int
-mlx5_rxq_mempool_register(struct rte_eth_dev *dev,
- struct mlx5_rxq_ctrl *rxq_ctrl)
+mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = rxq_ctrl->sh;
struct rte_mempool *mp;
uint32_t s;
int ret = 0;
}
for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
mp = rxq_ctrl->rxq.rxseg[s].mp;
- ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache,
- sh->cdev->pd, mp, &priv->mp_id);
+ ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp);
if (ret < 0 && rte_errno != EEXIST)
return ret;
rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
* the implicit registration is enabled or not,
* Rx mempool destruction is tracked to free MRs.
*/
- if (mlx5_rxq_mempool_register(dev, rxq_ctrl) < 0)
+ if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
return -rte_errno;
ret = rxq_alloc_elts(rxq_ctrl);
if (ret)
#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
#endif
-/**
- * Query LKey from a packet buffer for Tx.
- *
- * @param txq
- * Pointer to Tx queue structure.
- * @param mb
- * Pointer to mbuf.
- *
- * @return
- * Searched LKey on success, UINT32_MAX on no match.
- */
-static __rte_always_inline uint32_t
-mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
-{
- struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct mlx5_txq_ctrl *txq_ctrl =
- container_of(txq, struct mlx5_txq_ctrl, txq);
-
- /* Take slower bottom-half on miss. */
- return mlx5_mr_mb2mr(mr_ctrl, mb, &txq_ctrl->priv->mp_id);
-}
-
/**
* Ring TX queue doorbell and flush the update if requested.
*
{
MLX5_ASSERT(len);
dseg->bcount = rte_cpu_to_be_32(len);
- dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
+ dseg->lkey = mlx5_mr_mb2mr(&txq->mr_ctrl, loc->mbuf);
dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
}
MLX5_ASSERT(len);
if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
dseg->bcount = rte_cpu_to_be_32(len);
- dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
+ dseg->lkey = mlx5_mr_mb2mr(&txq->mr_ctrl, loc->mbuf);
dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
return;
struct mlx5_klm klm;
klm.byte_count = rte_pktmbuf_data_len(op->mbuf);
- klm.mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, op->mbuf, 0);
+ klm.mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, op->mbuf);
klm.address = rte_pktmbuf_mtod(op->mbuf, uintptr_t);
__prep_one(priv, qp_obj, op, job, qp_obj->pi, &klm);
qp_obj->db_pi = qp_obj->pi;
addr = rte_pktmbuf_mtod(mbuf, uintptr_t);
/* Build indirect mkey seg's KLM. */
mkey_klm->mkey = mlx5_mr_mb2mr(&qp->mr_ctrl,
- mbuf, 0);
+ mbuf);
mkey_klm->address = rte_cpu_to_be_64(addr);
mkey_klm->byte_count = rte_cpu_to_be_32
(rte_pktmbuf_data_len(mbuf));
klm.byte_count = scatter_size;
} else {
/* The single mubf case. Build the KLM directly. */
- klm.mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf, 0);
+ klm.mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf);
klm.address = rte_pktmbuf_mtod(mbuf, uintptr_t);
klm.byte_count = rte_pktmbuf_data_len(mbuf);
}