mlx5_glue = NULL;
}
+/**
+ * Allocate Protection Domain object and extract its pdn using DV API.
+ *
+ * @param[out] cdev
+ * Pointer to the mlx5 device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_os_pd_create(struct mlx5_common_device *cdev)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ struct mlx5dv_obj obj;
+ struct mlx5dv_pd pd_info;
+ int ret;
+#endif
+
+ cdev->pd = mlx5_glue->alloc_pd(cdev->ctx);
+ if (cdev->pd == NULL) {
+ DRV_LOG(ERR, "Failed to allocate PD.");
+ return errno ? -errno : -ENOMEM;
+ }
+ if (cdev->config.devx == 0)
+ return 0;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ obj.pd.in = cdev->pd;
+ obj.pd.out = &pd_info;
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Fail to get PD object info.");
+ mlx5_glue->dealloc_pd(cdev->pd);
+ cdev->pd = NULL;
+ return -errno;
+ }
+ cdev->pdn = pd_info.pdn;
+ return 0;
+#else
+ DRV_LOG(ERR, "Cannot get pdn - no DV support.");
+ return -ENOTSUP;
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+}
+
static struct ibv_device *
mlx5_os_get_ibv_device(const struct rte_pci_addr *addr)
{
#endif
}
-__rte_internal
-static inline void *
-mlx5_os_alloc_pd(void *ctx)
-{
- return mlx5_glue->alloc_pd(ctx);
-}
-
-__rte_internal
static inline int
mlx5_os_dealloc_pd(void *pd)
{
static void
mlx5_dev_hw_global_release(struct mlx5_common_device *cdev)
{
+ if (cdev->pd != NULL) {
+ claim_zero(mlx5_os_dealloc_pd(cdev->pd));
+ cdev->pd = NULL;
+ }
if (cdev->ctx != NULL) {
claim_zero(mlx5_glue->close_device(cdev->ctx));
cdev->ctx = NULL;
ret = mlx5_os_open_device(cdev, classes);
if (ret < 0)
return ret;
+ /* Allocate Protection Domain object and extract its pdn. */
+ ret = mlx5_os_pd_create(cdev);
+ if (ret)
+ goto error;
return 0;
+error:
+ mlx5_dev_hw_global_release(cdev);
+ return ret;
}
static void
TAILQ_ENTRY(mlx5_common_device) next;
uint32_t classes_loaded;
void *ctx; /* Verbs/DV/DevX context. */
+ void *pd; /* Protection Domain. */
+ uint32_t pdn; /* Protection Domain Number. */
struct mlx5_common_dev_config config; /* Device configuration. */
};
/* mlx5_common_os.c */
int mlx5_os_open_device(struct mlx5_common_device *cdev, uint32_t classes);
+int mlx5_os_pd_create(struct mlx5_common_device *cdev);
#endif /* RTE_PMD_MLX5_COMMON_H_ */
mlx5_nl_vlan_vmwa_create; # WINDOWS_NO_EXPORT
mlx5_nl_vlan_vmwa_delete; # WINDOWS_NO_EXPORT
- mlx5_os_alloc_pd;
- mlx5_os_dealloc_pd;
mlx5_os_dereg_mr;
mlx5_os_reg_mr;
mlx5_os_umem_dereg;
{
}
-/**
- * Allocate PD. Given a DevX context object
- * return an mlx5-pd object.
- *
- * @param[in] ctx
- * Pointer to context.
- *
- * @return
- * The mlx5_pd if pd is valid, NULL and errno otherwise.
- */
-void *
-mlx5_os_alloc_pd(void *ctx)
-{
- struct mlx5_pd *ppd = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_pd),
- 0, SOCKET_ID_ANY);
- if (!ppd)
- return NULL;
-
- struct mlx5_devx_obj *obj = mlx5_devx_cmd_alloc_pd(ctx);
- if (!obj) {
- mlx5_free(ppd);
- return NULL;
- }
- ppd->obj = obj;
- ppd->pdn = obj->id;
- ppd->devx_ctx = ctx;
- return ppd;
-}
-
/**
* Release PD. Releases a given mlx5_pd object
*
return 0;
}
+/**
+ * Allocate Protection Domain object and extract its pdn using DV API.
+ *
+ * @param[out] dev
+ * Pointer to the mlx5 device.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_os_pd_create(struct mlx5_common_device *cdev)
+{
+ struct mlx5_pd *pd;
+
+ pd = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pd), 0, SOCKET_ID_ANY);
+ if (!pd)
+ return -1;
+ struct mlx5_devx_obj *obj = mlx5_devx_cmd_alloc_pd(cdev->ctx);
+ if (!obj) {
+ mlx5_free(pd);
+ return -1;
+ }
+ pd->obj = obj;
+ pd->pdn = obj->id;
+ pd->devx_ctx = cdev->ctx;
+ cdev->pd = pd;
+ cdev->pdn = pd->pdn;
+ return 0;
+}
+
/**
* Detect if a devx_device_bdf object has identical DBDF values to the
* rte_pci_addr found in bus/pci probing.
return -ENOTSUP;
}
-__rte_internal
-void *mlx5_os_alloc_pd(void *ctx);
-__rte_internal
int mlx5_os_dealloc_pd(void *pd);
__rte_internal
void *mlx5_os_umem_reg(void *ctx, void *addr, size_t size, uint32_t access);
struct rte_compressdev *compressdev;
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
void *uar;
- uint32_t pdn; /* Protection Domain number. */
uint8_t min_block_size;
uint8_t qp_ts_format; /* Whether SQ supports timestamp formats. */
/* Minimum huffman block size supported by the device. */
- struct ibv_pd *pd;
struct rte_compressdev_config dev_config;
LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
rte_spinlock_t xform_sl;
.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
};
struct mlx5_devx_qp_attr qp_attr = {
- .pd = priv->pdn,
+ .pd = priv->cdev->pdn,
.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar),
.user_index = qp_id,
};
qp->priv = priv;
qp->ops = (struct rte_comp_op **)RTE_ALIGN((uintptr_t)(qp + 1),
RTE_CACHE_LINE_SIZE);
- if (mlx5_common_verbs_reg_mr(priv->pd, opaq_buf, qp->entries_n *
+ if (mlx5_common_verbs_reg_mr(priv->cdev->pd, opaq_buf, qp->entries_n *
sizeof(struct mlx5_gga_compress_opaque),
&qp->opaque_mr) != 0) {
rte_free(opaq_buf);
if (likely(lkey != UINT32_MAX))
return lkey;
/* Take slower bottom-half on miss. */
- return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
- !!(ol_flags & EXT_ATTACHED_MBUF));
+ return mlx5_mr_addr2mr_bh(priv->cdev->pd, 0, &priv->mr_scache, mr_ctrl,
+ addr, !!(ol_flags & EXT_ATTACHED_MBUF));
}
static __rte_always_inline uint32_t
}
static void
-mlx5_compress_hw_global_release(struct mlx5_compress_priv *priv)
+mlx5_compress_uar_release(struct mlx5_compress_priv *priv)
{
- if (priv->pd != NULL) {
- claim_zero(mlx5_glue->dealloc_pd(priv->pd));
- priv->pd = NULL;
- }
if (priv->uar != NULL) {
mlx5_glue->devx_free_uar(priv->uar);
priv->uar = NULL;
}
static int
-mlx5_compress_pd_create(struct mlx5_compress_priv *priv)
+mlx5_compress_uar_prepare(struct mlx5_compress_priv *priv)
{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5dv_obj obj;
- struct mlx5dv_pd pd_info;
- int ret;
-
- priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
- if (priv->pd == NULL) {
- DRV_LOG(ERR, "Failed to allocate PD.");
- return errno ? -errno : -ENOMEM;
- }
- obj.pd.in = priv->pd;
- obj.pd.out = &pd_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
- if (ret != 0) {
- DRV_LOG(ERR, "Fail to get PD object info.");
- mlx5_glue->dealloc_pd(priv->pd);
- priv->pd = NULL;
- return -errno;
- }
- priv->pdn = pd_info.pdn;
- return 0;
-#else
- (void)priv;
- DRV_LOG(ERR, "Cannot get pdn - no DV support.");
- return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
-static int
-mlx5_compress_hw_global_prepare(struct mlx5_compress_priv *priv)
-{
- if (mlx5_compress_pd_create(priv) != 0)
- return -1;
priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
NULL) {
rte_errno = errno;
- claim_zero(mlx5_glue->dealloc_pd(priv->pd));
DRV_LOG(ERR, "Failed to allocate UAR.");
return -1;
}
priv->compressdev = compressdev;
priv->min_block_size = att.compress_min_block_size;
priv->qp_ts_format = att.qp_ts_format;
- if (mlx5_compress_hw_global_prepare(priv) != 0) {
+ if (mlx5_compress_uar_prepare(priv) != 0) {
rte_compressdev_pmd_destroy(priv->compressdev);
return -1;
}
if (mlx5_mr_btree_init(&priv->mr_scache.cache,
MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
- mlx5_compress_hw_global_release(priv);
+ mlx5_compress_uar_release(priv);
rte_compressdev_pmd_destroy(priv->compressdev);
rte_errno = ENOMEM;
return -rte_errno;
rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
NULL);
mlx5_mr_release_cache(&priv->mr_scache);
- mlx5_compress_hw_global_release(priv);
+ mlx5_compress_uar_release(priv);
rte_compressdev_pmd_destroy(priv->compressdev);
}
return 0;
if (likely(lkey != UINT32_MAX))
return lkey;
/* Take slower bottom-half on miss. */
- return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
- !!(ol_flags & EXT_ATTACHED_MBUF));
+ return mlx5_mr_addr2mr_bh(priv->cdev->pd, 0, &priv->mr_scache, mr_ctrl,
+ addr, !!(ol_flags & EXT_ATTACHED_MBUF));
}
static __rte_always_inline uint32_t
struct mlx5_umr_wqe *umr;
uint32_t i;
struct mlx5_devx_mkey_attr attr = {
- .pd = priv->pdn,
+ .pd = priv->cdev->pdn,
.umr_en = 1,
.crypto_en = 1,
.set_remote_rw = 1,
DRV_LOG(ERR, "Failed to create CQ.");
goto error;
}
- attr.pd = priv->pdn;
+ attr.pd = priv->cdev->pdn;
attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
attr.cqn = qp->cq_obj.cq->id;
attr.rq_size = 0;
};
static void
-mlx5_crypto_hw_global_release(struct mlx5_crypto_priv *priv)
+mlx5_crypto_uar_release(struct mlx5_crypto_priv *priv)
{
- if (priv->pd != NULL) {
- claim_zero(mlx5_glue->dealloc_pd(priv->pd));
- priv->pd = NULL;
- }
if (priv->uar != NULL) {
mlx5_glue->devx_free_uar(priv->uar);
priv->uar = NULL;
}
static int
-mlx5_crypto_pd_create(struct mlx5_crypto_priv *priv)
+mlx5_crypto_uar_prepare(struct mlx5_crypto_priv *priv)
{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5dv_obj obj;
- struct mlx5dv_pd pd_info;
- int ret;
-
- priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
- if (priv->pd == NULL) {
- DRV_LOG(ERR, "Failed to allocate PD.");
- return errno ? -errno : -ENOMEM;
- }
- obj.pd.in = priv->pd;
- obj.pd.out = &pd_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
- if (ret != 0) {
- DRV_LOG(ERR, "Fail to get PD object info.");
- mlx5_glue->dealloc_pd(priv->pd);
- priv->pd = NULL;
- return -errno;
- }
- priv->pdn = pd_info.pdn;
- return 0;
-#else
- (void)priv;
- DRV_LOG(ERR, "Cannot get pdn - no DV support.");
- return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
-static int
-mlx5_crypto_hw_global_prepare(struct mlx5_crypto_priv *priv)
-{
- if (mlx5_crypto_pd_create(priv) != 0)
- return -1;
priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
if (priv->uar)
priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
if (priv->uar == NULL || priv->uar_addr == NULL) {
rte_errno = errno;
- claim_zero(mlx5_glue->dealloc_pd(priv->pd));
DRV_LOG(ERR, "Failed to allocate UAR.");
return -1;
}
priv->login_obj = login;
priv->crypto_dev = crypto_dev;
priv->qp_ts_format = attr.qp_ts_format;
- if (mlx5_crypto_hw_global_prepare(priv) != 0) {
+ if (mlx5_crypto_uar_prepare(priv) != 0) {
rte_cryptodev_pmd_destroy(priv->crypto_dev);
return -1;
}
if (mlx5_mr_btree_init(&priv->mr_scache.cache,
MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
- mlx5_crypto_hw_global_release(priv);
+ mlx5_crypto_uar_release(priv);
rte_cryptodev_pmd_destroy(priv->crypto_dev);
rte_errno = ENOMEM;
return -rte_errno;
rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
NULL);
mlx5_mr_release_cache(&priv->mr_scache);
- mlx5_crypto_hw_global_release(priv);
+ mlx5_crypto_uar_release(priv);
rte_cryptodev_pmd_destroy(priv->crypto_dev);
claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
}
struct rte_cryptodev *crypto_dev;
void *uar; /* User Access Region. */
volatile uint64_t *uar_addr;
- uint32_t pdn; /* Protection Domain number. */
uint32_t max_segs_num; /* Maximum supported data segs. */
uint8_t qp_ts_format; /* Whether QP supports timestamp formats. */
- struct ibv_pd *pd;
struct mlx5_hlist *dek_hlist; /* Dek hash list. */
struct rte_cryptodev_config dev_config;
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
struct mlx5_crypto_dek *dek = rte_zmalloc(__func__, sizeof(*dek),
RTE_CACHE_LINE_SIZE);
struct mlx5_devx_dek_attr dek_attr = {
- .pd = ctx->priv->pdn,
+ .pd = ctx->priv->cdev->pdn,
.key_purpose = MLX5_CRYPTO_KEY_PURPOSE_AES_XTS,
.has_keytag = 1,
};
switch (param->type) {
case MLX5_MP_REQ_CREATE_MR:
mp_init_msg(&priv->mp_id, &mp_res, param->type);
- lkey = mlx5_mr_create_primary(priv->sh->pd,
+ lkey = mlx5_mr_create_primary(cdev->pd,
&priv->sh->share_cache,
&entry, param->args.addr,
cdev->config.mr_ext_memseg_en);
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
.max_sge = 1,
- .pd = priv->sh->pd,
+ .pd = priv->sh->cdev->pd,
.cq = cq,
});
if (wq) {
return mlx5_os_auxiliary_probe(cdev);
}
-/**
- * Extract pdn of PD object using DV API.
- *
- * @param[in] pd
- * Pointer to the verbs PD object.
- * @param[out] pdn
- * Pointer to the PD object number variable.
- *
- * @return
- * 0 on success, error value otherwise.
- */
-int
-mlx5_os_get_pdn(void *pd, uint32_t *pdn)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- struct mlx5dv_obj obj;
- struct mlx5dv_pd pd_info;
- int ret = 0;
-
- obj.pd.in = pd;
- obj.pd.out = &pd_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
- if (ret) {
- DRV_LOG(DEBUG, "Fail to get PD object info");
- return ret;
- }
- *pdn = pd_info.pdn;
- return 0;
-#else
- (void)pd;
- (void)pdn;
- return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
/**
* Install shared asynchronous device events handler.
* This function is implemented to support event sharing
.max_wr = wqe_n >> rxq_data->sges_n,
/* Max number of scatter/gather elements in a WR. */
.max_sge = 1 << rxq_data->sges_n,
- .pd = priv->sh->pd,
+ .pd = priv->sh->cdev->pd,
.cq = rxq_obj->ibv_cq,
.comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
.create_flags = (rxq_data->vlan_strip ?
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->sh->pd,
+ .pd = priv->sh->cdev->pd,
},
&qp_init_attr);
#else
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->sh->pd,
+ .pd = priv->sh->cdev->pd,
});
#endif
if (!qp) {
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
.max_sge = 1,
- .pd = priv->sh->pd,
+ .pd = priv->sh->cdev->pd,
.cq = rxq->ibv_cq,
});
if (!rxq->wq) {
.rx_hash_fields_mask = 0,
},
.rwq_ind_tbl = ind_tbl,
- .pd = priv->sh->pd
+ .pd = priv->sh->cdev->pd
});
if (!hrxq->qp) {
DRV_LOG(DEBUG, "Port %u cannot allocate QP for drop queue.",
qp_attr.qp_type = IBV_QPT_RAW_PACKET,
/* Do *NOT* enable this, completions events are managed per Tx burst. */
qp_attr.sq_sig_all = 0;
- qp_attr.pd = priv->sh->pd;
+ qp_attr.pd = priv->sh->cdev->pd;
qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD;
if (txq_data->inlen_send)
qp_attr.cap.max_inline_data = txq_ctrl->max_inline_data;
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask = IBV_QP_INIT_ATTR_PD,
- .pd = sh->pd,
+ .pd = sh->cdev->pd,
.send_cq = sh->self_lb.ibv_cq,
.recv_cq = sh->self_lb.ibv_cq,
.cap.max_recv_wr = 1,
mlx5_mp_id_init(&mp_id, 0);
if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0)
DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
- mp->name, sh->pd, rte_strerror(rte_errno));
+ mp->name, sh->cdev->pd, rte_strerror(rte_errno));
}
/**
int ret;
mlx5_mp_id_init(&mp_id, 0);
- ret = mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp, &mp_id);
+ ret = mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
+ &mp_id);
if (ret < 0 && rte_errno != EEXIST)
DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
- mp->name, sh->pd, rte_strerror(rte_errno));
+ mp->name, sh->cdev->pd, rte_strerror(rte_errno));
}
/**
switch (event) {
case RTE_MEMPOOL_EVENT_READY:
mlx5_mp_id_init(&mp_id, 0);
- if (mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp,
+ if (mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
&mp_id) < 0)
DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
- mp->name, sh->pd, rte_strerror(rte_errno));
+ mp->name, sh->cdev->pd,
+ rte_strerror(rte_errno));
break;
case RTE_MEMPOOL_EVENT_DESTROY:
mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
}
- sh->pd = mlx5_os_alloc_pd(sh->cdev->ctx);
- if (sh->pd == NULL) {
- DRV_LOG(ERR, "PD allocation failure");
- err = ENOMEM;
- goto error;
- }
if (sh->devx) {
- err = mlx5_os_get_pdn(sh->pd, &sh->pdn);
- if (err) {
- DRV_LOG(ERR, "Fail to extract pdn from PD");
- goto error;
- }
sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
if (!sh->td) {
DRV_LOG(ERR, "TD allocation failure");
mlx5_glue->devx_free_uar(sh->devx_rx_uar);
if (sh->tx_uar)
mlx5_glue->devx_free_uar(sh->tx_uar);
- if (sh->pd)
- claim_zero(mlx5_os_dealloc_pd(sh->pd));
mlx5_free(sh);
MLX5_ASSERT(err > 0);
rte_errno = err;
mlx5_glue->devx_free_uar(sh->tx_uar);
sh->tx_uar = NULL;
}
- if (sh->pd)
- claim_zero(mlx5_os_dealloc_pd(sh->pd));
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
uint32_t max_port; /* Maximal IB device port index. */
struct mlx5_bond_info bond; /* Bonding information. */
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
- void *pd; /* Protection Domain. */
- uint32_t pdn; /* Protection Domain number. */
uint32_t tdn; /* Transport Domain number. */
char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
struct rte_pci_driver;
int mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *dev_attr);
void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
-int mlx5_os_get_pdn(void *pd, uint32_t *pdn);
int mlx5_os_net_probe(struct mlx5_common_device *cdev);
void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh);
rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
MLX5_WQ_END_PAD_MODE_ALIGN :
MLX5_WQ_END_PAD_MODE_NONE;
- rq_attr.wq_attr.pd = priv->sh->pdn;
+ rq_attr.wq_attr.pd = priv->sh->cdev->pdn;
rq_attr.counter_set_id = priv->counter_set_id;
/* Create RQ using DevX API. */
return mlx5_devx_rq_create(priv->sh->cdev->ctx, &rxq_ctrl->obj->rq_obj,
.tis_lst_sz = 1,
.tis_num = priv->sh->tis->id,
.wq_attr = (struct mlx5_devx_wq_attr){
- .pd = priv->sh->pdn,
+ .pd = priv->sh->cdev->pdn,
.uar_page =
mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),
},
mkey_attr.addr = (uintptr_t)mem;
mkey_attr.size = size;
mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
- mkey_attr.pd = sh->pdn;
+ mkey_attr.pd = sh->cdev->pdn;
mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr);
DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
return -1;
}
- ret = sh->share_cache.reg_mr_cb(sh->pd, mr->addr, length, mr);
+ ret = sh->share_cache.reg_mr_cb(sh->cdev->pd, mr->addr, length, mr);
if (ret) {
DRV_LOG(ERR, "Failed to create direct Mkey.");
mlx5_free(mr->addr);
sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
return -1;
if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format)) {
+ sh->tx_uar, cdev->pdn,
+ MLX5_ASO_QUEUE_LOG_DESC,
+ sh->sq_ts_format)) {
mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
return -1;
}
break;
case ASO_OPC_MOD_POLICER:
if (mlx5_aso_sq_create(cdev->ctx, &sh->mtrmng->pools_mng.sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format))
+ sh->tx_uar, cdev->pdn,
+ MLX5_ASO_QUEUE_LOG_DESC,
+ sh->sq_ts_format))
return -1;
mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
break;
&sh->ct_mng->aso_sq.mr, 0))
return -1;
if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
- sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
- sh->sq_ts_format)) {
+ sh->tx_uar, cdev->pdn,
+ MLX5_ASO_QUEUE_LOG_DESC,
+ sh->sq_ts_format)) {
mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
return -1;
}
* NULL otherwise and rte_errno is set.
*/
static struct mlx5_aso_mtr_pool *
-flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
- struct mlx5_aso_mtr **mtr_free)
+flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_aso_mtr_pools_mng *pools_mng =
- &priv->sh->mtrmng->pools_mng;
+ struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
struct mlx5_aso_mtr_pool *pool = NULL;
struct mlx5_devx_obj *dcs = NULL;
uint32_t i;
log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
- priv->sh->pdn, log_obj_size);
+ priv->sh->cdev->pdn,
+ log_obj_size);
if (!dcs) {
rte_errno = ENODATA;
return NULL;
pools_mng->n_valid++;
for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
pool->mtrs[i].offset = i;
- LIST_INSERT_HEAD(&pools_mng->meters,
- &pool->mtrs[i], next);
+ LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
}
pool->mtrs[0].offset = 0;
*mtr_free = &pool->mtrs[0];
uint32_t i;
obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
- priv->sh->pdn);
+ priv->sh->cdev->pdn);
if (!obj) {
rte_errno = ENODATA;
DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
- priv->sh->pdn, log_obj_size);
+ priv->sh->cdev->pdn,
+ log_obj_size);
if (!obj) {
rte_errno = ENODATA;
DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
struct mlx5_priv *priv = txq_ctrl->priv;
- return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
+ return mlx5_mr_addr2mr_bh(priv->sh->cdev->pd, &priv->mp_id,
&priv->sh->share_cache, mr_ctrl, addr,
priv->sh->cdev->config.mr_ext_memseg_en);
}
return;
DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
dev->data->port_id, mem_idx, mp->name);
- mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
+ mr = mlx5_create_mr_ext(sh->cdev->pd, addr, len, mp->socket_id,
sh->share_cache.reg_mr_cb);
if (!mr) {
DRV_LOG(WARNING,
mlx5_mr_insert_cache(&sh->share_cache, mr);
rte_rwlock_write_unlock(&sh->share_cache.rwlock);
/* Insert to the local cache table */
- mlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache, mr_ctrl,
- addr, priv->sh->cdev->config.mr_ext_memseg_en);
+ mlx5_mr_addr2mr_bh(sh->cdev->pd, &priv->mp_id, &sh->share_cache,
+ mr_ctrl, addr, sh->cdev->config.mr_ext_memseg_en);
}
/**
}
priv = dev->data->dev_private;
sh = priv->sh;
- mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
- sh->share_cache.reg_mr_cb);
+ mr = mlx5_create_mr_ext(sh->cdev->pd, (uintptr_t)addr, len,
+ SOCKET_ID_ANY, sh->share_cache.reg_mr_cb);
if (!mr) {
DRV_LOG(WARNING,
"port %u unable to dma map", dev->data->port_id);
rte_errno = ENOMEM;
return -rte_errno;
}
- ret = mlx5_mr_mempool_register(&priv->sh->share_cache, priv->sh->pd,
- mp, &priv->mp_id);
+ ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
+ priv->sh->cdev->pd, mp, &priv->mp_id);
if (ret < 0 && rte_errno != EEXIST) {
ret = rte_errno;
DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
mp = rxq_ctrl->rxq.rxseg[s].mp;
ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
- priv->sh->pd, mp, &priv->mp_id);
+ priv->sh->cdev->pd, mp,
+ &priv->mp_id);
if (ret < 0 && rte_errno != EEXIST)
return ret;
rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
.tis_lst_sz = 1,
.tis_num = sh->tis->id,
.wq_attr = (struct mlx5_devx_wq_attr){
- .pd = sh->pdn,
+ .pd = sh->cdev->pdn,
.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
},
.ts_format = mlx5_ts_format_conv(sh->sq_ts_format),
sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
sq_attr.wq_attr.cd_slave = 1;
sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
- sq_attr.wq_attr.pd = sh->pdn;
+ sq_attr.wq_attr.pd = sh->cdev->pdn;
sq_attr.ts_format = mlx5_ts_format_conv(sh->sq_ts_format);
ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
log2above(wq->sq_size),
*dereg_mr_cb = mlx5_os_dereg_mr;
}
-/**
- * Extract pdn of PD object using DevX
- *
- * @param[in] pd
- * Pointer to the DevX PD object.
- * @param[out] pdn
- * Pointer to the PD object number variable.
- *
- * @return
- * 0 on success, error value otherwise.
- */
-int
-mlx5_os_get_pdn(void *pd, uint32_t *pdn)
-{
- if (!pd)
- return -EINVAL;
-
- *pdn = ((struct mlx5_pd *)pd)->pdn;
- return 0;
-}
-
const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};
rte_errno = ENOMEM;
goto error;
}
- priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
- if (!priv->pd) {
- DRV_LOG(ERR, "can't allocate pd.");
- rte_errno = ENOMEM;
- goto error;
- }
priv->regexdev->dev_ops = &mlx5_regexdev_ops;
priv->regexdev->enqueue = mlx5_regexdev_enqueue;
#ifdef HAVE_MLX5_UMR_IMKEY
return 0;
error:
- if (priv->pd)
- mlx5_glue->dealloc_pd(priv->pd);
if (priv->uar)
mlx5_glue->devx_free_uar(priv->uar);
if (priv->regexdev)
NULL);
if (priv->mr_scache.cache.table)
mlx5_mr_release_cache(&priv->mr_scache);
- if (priv->pd)
- mlx5_glue->dealloc_pd(priv->pd);
if (priv->uar)
mlx5_glue->devx_free_uar(priv->uar);
if (priv->regexdev)
MLX5_RXP_EM_COUNT];
uint32_t nb_engines; /* Number of RegEx engines. */
struct mlx5dv_devx_uar *uar; /* UAR object. */
- struct ibv_pd *pd;
TAILQ_ENTRY(mlx5_regex_priv) mem_event_cb;
/**< Called by memory event callback. */
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
uint32_t mmo_regex_sq_cap:1;
};
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-static inline int
-regex_get_pdn(void *pd, uint32_t *pdn)
-{
- struct mlx5dv_obj obj;
- struct mlx5dv_pd pd_info;
- int ret = 0;
-
- obj.pd.in = pd;
- obj.pd.out = &pd_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
- if (ret) {
- DRV_LOG(DEBUG, "Fail to get PD object info");
- return ret;
- }
- *pdn = pd_info.pdn;
- return 0;
-}
-#endif
-
/* mlx5_regex.c */
int mlx5_regex_start(struct rte_regexdev *dev);
int mlx5_regex_stop(struct rte_regexdev *dev);
struct mlx5_devx_qp_attr attr = {
.cqn = qp->cq.cq_obj.cq->id,
.uar_index = priv->uar->page_id,
+ .pd = priv->cdev->pdn,
.ts_format = mlx5_ts_format_conv(priv->qp_ts_format),
.user_index = q_ind,
};
struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
- uint32_t pd_num = 0;
int ret;
qp_obj->log_nb_desc = log_nb_desc;
qp_obj->qpn = q_ind;
qp_obj->ci = 0;
qp_obj->pi = 0;
- ret = regex_get_pdn(priv->pd, &pd_num);
- if (ret)
- return ret;
- attr.pd = pd_num;
attr.rq_size = 0;
attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr,
log_nb_desc));
if (likely(lkey != UINT32_MAX))
return lkey;
/* Take slower bottom-half on miss. */
- return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
- !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
+ return mlx5_mr_addr2mr_bh(priv->cdev->pd, 0, &priv->mr_scache, mr_ctrl,
+ addr, !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
}
static int
setup_buffers(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp)
{
- struct ibv_pd *pd = priv->pd;
+ struct ibv_pd *pd = priv->cdev->pd;
uint32_t i;
int err;
if (priv->has_umr) {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- if (regex_get_pdn(priv->pd, &attr.pd)) {
- err = -rte_errno;
- DRV_LOG(ERR, "Failed to get pdn.");
- mlx5_regexdev_teardown_fastpath(priv, qp_id);
- return err;
- }
+ attr.pd = priv->cdev->pdn;
#endif
for (i = 0; i < qp->nb_desc; i++) {
attr.klm_num = MLX5_REGEX_MAX_KLM_NUM;
return 0;
}
-static int
-mlx5_vdpa_pd_create(struct mlx5_vdpa_priv *priv)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
- if (priv->pd == NULL) {
- DRV_LOG(ERR, "Failed to allocate PD.");
- return errno ? -errno : -ENOMEM;
- }
- struct mlx5dv_obj obj;
- struct mlx5dv_pd pd_info;
- int ret = 0;
-
- obj.pd.in = priv->pd;
- obj.pd.out = &pd_info;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
- if (ret) {
- DRV_LOG(ERR, "Fail to get PD object info.");
- mlx5_glue->dealloc_pd(priv->pd);
- priv->pd = NULL;
- return -errno;
- }
- priv->pdn = pd_info.pdn;
- return 0;
-#else
- (void)priv;
- DRV_LOG(ERR, "Cannot get pdn - no DV support.");
- return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
static int
mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)
{
mlx5_vdpa_virtqs_release(priv);
mlx5_vdpa_event_qp_global_release(priv);
mlx5_vdpa_mem_dereg(priv);
- if (priv->pd) {
- claim_zero(mlx5_glue->dealloc_pd(priv->pd));
- priv->pd = NULL;
- }
priv->configured = 0;
priv->vid = 0;
/* The mutex may stay locked after event thread cancel - initiate it. */
if (mlx5_vdpa_mtu_set(priv))
DRV_LOG(WARNING, "MTU cannot be set on device %s.",
vdev->device->name);
- if (mlx5_vdpa_pd_create(priv) || mlx5_vdpa_mem_register(priv) ||
- mlx5_vdpa_err_event_setup(priv) ||
+ if (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_err_event_setup(priv) ||
mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
mlx5_vdpa_cqe_event_setup(priv)) {
mlx5_vdpa_dev_close(vid);
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
int vid; /* vhost device id. */
struct mlx5_hca_vdpa_attr caps;
- uint32_t pdn; /* Protection Domain number. */
- struct ibv_pd *pd;
uint32_t gpa_mkey_index;
struct ibv_mr *null_mr;
struct rte_vhost_memory *vmem;
return -1;
if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
return -1;
- attr.pd = priv->pdn;
+ attr.pd = priv->cdev->pdn;
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
if (!eqp->fw_qp) {
struct mlx5_devx_mkey_attr mkey_attr = {
.addr = (uintptr_t)log_base,
.size = log_size,
- .pd = priv->pdn,
+ .pd = priv->cdev->pdn,
.pg_access = 1,
};
struct mlx5_devx_virtq_attr attr = {
if (!mem)
return -rte_errno;
priv->vmem = mem;
- priv->null_mr = mlx5_glue->alloc_null_mr(priv->pd);
+ priv->null_mr = mlx5_glue->alloc_null_mr(priv->cdev->pd);
if (!priv->null_mr) {
DRV_LOG(ERR, "Failed to allocate null MR.");
ret = -errno;
mkey_attr.addr = (uintptr_t)(reg->guest_phys_addr);
mkey_attr.size = reg->size;
mkey_attr.umem_id = entry->umem->umem_id;
- mkey_attr.pd = priv->pdn;
+ mkey_attr.pd = priv->cdev->pdn;
mkey_attr.pg_access = 1;
entry->mkey = mlx5_devx_cmd_mkey_create(priv->cdev->ctx,
&mkey_attr);
}
mkey_attr.addr = (uintptr_t)(mem->regions[0].guest_phys_addr);
mkey_attr.size = mem_size;
- mkey_attr.pd = priv->pdn;
+ mkey_attr.pd = priv->cdev->pdn;
mkey_attr.umem_id = 0;
/* Must be zero for KLM mode. */
mkey_attr.log_entity_size = mode == MLX5_MKC_ACCESS_MODE_KLM_FBS ?
attr.mkey = priv->gpa_mkey_index;
attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
attr.queue_index = index;
- attr.pd = priv->pdn;
+ attr.pd = priv->cdev->pdn;
attr.hw_latency_mode = priv->hw_latency_mode;
attr.hw_max_latency_us = priv->hw_max_latency_us;
attr.hw_max_pending_comp = priv->hw_max_pending_comp;