]> git.droids-corp.org - dpdk.git/commitdiff
common/mlx5: share protection domain object
authorMichael Baum <michaelba@nvidia.com>
Tue, 19 Oct 2021 20:55:54 +0000 (23:55 +0300)
committerThomas Monjalon <thomas@monjalon.net>
Thu, 21 Oct 2021 13:53:46 +0000 (15:53 +0200)
Create shared Protection Domain in common area and add it and its PDN as
fields of common device structure.

Use this Protection Domain in all drivers and remove the PD and PDN
fields from their private structure.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
35 files changed:
drivers/common/mlx5/linux/mlx5_common_os.c
drivers/common/mlx5/linux/mlx5_common_os.h
drivers/common/mlx5/mlx5_common.c
drivers/common/mlx5/mlx5_common.h
drivers/common/mlx5/version.map
drivers/common/mlx5/windows/mlx5_common_os.c
drivers/common/mlx5/windows/mlx5_common_os.h
drivers/compress/mlx5/mlx5_compress.c
drivers/crypto/mlx5/mlx5_crypto.c
drivers/crypto/mlx5/mlx5_crypto.h
drivers/crypto/mlx5/mlx5_crypto_dek.c
drivers/net/mlx5/linux/mlx5_mp_os.c
drivers/net/mlx5/linux/mlx5_os.c
drivers/net/mlx5/linux/mlx5_verbs.c
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_devx.c
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow_aso.c
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_mr.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_trigger.c
drivers/net/mlx5/mlx5_txpp.c
drivers/net/mlx5/windows/mlx5_os.c
drivers/regex/mlx5/mlx5_regex.c
drivers/regex/mlx5/mlx5_regex.h
drivers/regex/mlx5/mlx5_regex_control.c
drivers/regex/mlx5/mlx5_regex_fastpath.c
drivers/vdpa/mlx5/mlx5_vdpa.c
drivers/vdpa/mlx5/mlx5_vdpa.h
drivers/vdpa/mlx5/mlx5_vdpa_event.c
drivers/vdpa/mlx5/mlx5_vdpa_lm.c
drivers/vdpa/mlx5/mlx5_vdpa_mem.c
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c

index 341822cf71ccbce19f415be313e1012c5e90adaa..8db3fe790a2e8e104cfdb1604aedf1c44f527d26 100644 (file)
@@ -406,6 +406,49 @@ glue_error:
        mlx5_glue = NULL;
 }
 
+/**
+ * Allocate Protection Domain object and extract its pdn using DV API.
+ *
+ * @param[out] cdev
+ *   Pointer to the mlx5 device.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_os_pd_create(struct mlx5_common_device *cdev)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       struct mlx5dv_obj obj;
+       struct mlx5dv_pd pd_info;
+       int ret;
+#endif
+
+       cdev->pd = mlx5_glue->alloc_pd(cdev->ctx);
+       if (cdev->pd == NULL) {
+               DRV_LOG(ERR, "Failed to allocate PD.");
+               return errno ? -errno : -ENOMEM;
+       }
+       if (cdev->config.devx == 0)
+               return 0;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+       obj.pd.in = cdev->pd;
+       obj.pd.out = &pd_info;
+       ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
+       if (ret != 0) {
+               DRV_LOG(ERR, "Fail to get PD object info.");
+               mlx5_glue->dealloc_pd(cdev->pd);
+               cdev->pd = NULL;
+               return -errno;
+       }
+       cdev->pdn = pd_info.pdn;
+       return 0;
+#else
+       DRV_LOG(ERR, "Cannot get pdn - no DV support.");
+       return -ENOTSUP;
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+}
+
 static struct ibv_device *
 mlx5_os_get_ibv_device(const struct rte_pci_addr *addr)
 {
index 0e605c3a9ed82f91ff21f0541a62853dfbe6c26b..c2957f91eccbbb1258f1c9c8d948dddc8a69fb56 100644 (file)
@@ -203,14 +203,6 @@ mlx5_os_get_devx_uar_page_id(void *uar)
 #endif
 }
 
-__rte_internal
-static inline void *
-mlx5_os_alloc_pd(void *ctx)
-{
-       return mlx5_glue->alloc_pd(ctx);
-}
-
-__rte_internal
 static inline int
 mlx5_os_dealloc_pd(void *pd)
 {
index 5786b5c0b973f21d111767453d0011ede19a9118..ec246c15f936d4d6d1d7e49c9ce456d017542f42 100644 (file)
@@ -320,6 +320,10 @@ mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
 static void
 mlx5_dev_hw_global_release(struct mlx5_common_device *cdev)
 {
+       if (cdev->pd != NULL) {
+               claim_zero(mlx5_os_dealloc_pd(cdev->pd));
+               cdev->pd = NULL;
+       }
        if (cdev->ctx != NULL) {
                claim_zero(mlx5_glue->close_device(cdev->ctx));
                cdev->ctx = NULL;
@@ -346,7 +350,14 @@ mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes)
        ret = mlx5_os_open_device(cdev, classes);
        if (ret < 0)
                return ret;
+       /* Allocate Protection Domain object and extract its pdn. */
+       ret = mlx5_os_pd_create(cdev);
+       if (ret)
+               goto error;
        return 0;
+error:
+       mlx5_dev_hw_global_release(cdev);
+       return ret;
 }
 
 static void
index 066860045a32c97f2ab75cd906389880a3079f4f..d72002ca3c2a0b08b022b6b4da569103f8a88efc 100644 (file)
@@ -346,6 +346,8 @@ struct mlx5_common_device {
        TAILQ_ENTRY(mlx5_common_device) next;
        uint32_t classes_loaded;
        void *ctx; /* Verbs/DV/DevX context. */
+       void *pd; /* Protection Domain. */
+       uint32_t pdn; /* Protection Domain Number. */
        struct mlx5_common_dev_config config; /* Device configuration. */
 };
 
@@ -447,5 +449,6 @@ mlx5_dev_is_pci(const struct rte_device *dev);
 /* mlx5_common_os.c */
 
 int mlx5_os_open_device(struct mlx5_common_device *cdev, uint32_t classes);
+int mlx5_os_pd_create(struct mlx5_common_device *cdev);
 
 #endif /* RTE_PMD_MLX5_COMMON_H_ */
index 24925fc4e45c95bd016cf40f0ef64a0098134d3b..44c4593888c341c7d27b015b9f49bde109cac706 100644 (file)
@@ -135,8 +135,6 @@ INTERNAL {
        mlx5_nl_vlan_vmwa_create; # WINDOWS_NO_EXPORT
        mlx5_nl_vlan_vmwa_delete; # WINDOWS_NO_EXPORT
 
-       mlx5_os_alloc_pd;
-       mlx5_os_dealloc_pd;
        mlx5_os_dereg_mr;
        mlx5_os_reg_mr;
        mlx5_os_umem_dereg;
index b7178cbbcf01f61ef8bf9a3e6c980356da6ee07c..4d0f1e92e365c81e92e6f9a30d8ca1ed060af04c 100644 (file)
@@ -25,35 +25,6 @@ mlx5_glue_constructor(void)
 {
 }
 
-/**
- * Allocate PD. Given a DevX context object
- * return an mlx5-pd object.
- *
- * @param[in] ctx
- *   Pointer to context.
- *
- * @return
- *    The mlx5_pd if pd is valid, NULL and errno otherwise.
- */
-void *
-mlx5_os_alloc_pd(void *ctx)
-{
-       struct mlx5_pd *ppd = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_pd),
-                                         0, SOCKET_ID_ANY);
-       if (!ppd)
-               return NULL;
-
-       struct mlx5_devx_obj *obj = mlx5_devx_cmd_alloc_pd(ctx);
-       if (!obj) {
-               mlx5_free(ppd);
-               return NULL;
-       }
-       ppd->obj = obj;
-       ppd->pdn = obj->id;
-       ppd->devx_ctx = ctx;
-       return ppd;
-}
-
 /**
  * Release PD. Releases a given mlx5_pd object
  *
@@ -73,6 +44,36 @@ mlx5_os_dealloc_pd(void *pd)
        return 0;
 }
 
+/**
+ * Allocate Protection Domain object and extract its pdn using DV API.
+ *
+ * @param[out] dev
+ *   Pointer to the mlx5 device.
+ *
+ * @return
+ *   0 on success, a negative value otherwise.
+ */
+int
+mlx5_os_pd_create(struct mlx5_common_device *cdev)
+{
+       struct mlx5_pd *pd;
+
+       pd = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pd), 0, SOCKET_ID_ANY);
+       if (!pd)
+               return -1;
+       struct mlx5_devx_obj *obj = mlx5_devx_cmd_alloc_pd(cdev->ctx);
+       if (!obj) {
+               mlx5_free(pd);
+               return -1;
+       }
+       pd->obj = obj;
+       pd->pdn = obj->id;
+       pd->devx_ctx = cdev->ctx;
+       cdev->pd = pd;
+       cdev->pdn = pd->pdn;
+       return 0;
+}
+
 /**
  * Detect if a devx_device_bdf object has identical DBDF values to the
  * rte_pci_addr found in bus/pci probing.
index 3756e1959b408add0341873e1e795f4d48cdfebe..c99645aefdcfb9e9cdaad15aea4ef6c4c92198c6 100644 (file)
@@ -248,9 +248,6 @@ mlx5_os_devx_subscribe_devx_event(void *eventc,
        return -ENOTSUP;
 }
 
-__rte_internal
-void *mlx5_os_alloc_pd(void *ctx);
-__rte_internal
 int mlx5_os_dealloc_pd(void *pd);
 __rte_internal
 void *mlx5_os_umem_reg(void *ctx, void *addr, size_t size, uint32_t access);
index 246a9c994b6794aa4a97247b5c3ce8a1a01e151a..4c8e67c4df89707368edafb11b82893a47078800 100644 (file)
@@ -38,11 +38,9 @@ struct mlx5_compress_priv {
        struct rte_compressdev *compressdev;
        struct mlx5_common_device *cdev; /* Backend mlx5 device. */
        void *uar;
-       uint32_t pdn; /* Protection Domain number. */
        uint8_t min_block_size;
        uint8_t qp_ts_format; /* Whether SQ supports timestamp formats. */
        /* Minimum huffman block size supported by the device. */
-       struct ibv_pd *pd;
        struct rte_compressdev_config dev_config;
        LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
        rte_spinlock_t xform_sl;
@@ -190,7 +188,7 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
                .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
        };
        struct mlx5_devx_qp_attr qp_attr = {
-               .pd = priv->pdn,
+               .pd = priv->cdev->pdn,
                .uar_index = mlx5_os_get_devx_uar_page_id(priv->uar),
                .user_index = qp_id,
        };
@@ -230,7 +228,7 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
        qp->priv = priv;
        qp->ops = (struct rte_comp_op **)RTE_ALIGN((uintptr_t)(qp + 1),
                                                   RTE_CACHE_LINE_SIZE);
-       if (mlx5_common_verbs_reg_mr(priv->pd, opaq_buf, qp->entries_n *
+       if (mlx5_common_verbs_reg_mr(priv->cdev->pd, opaq_buf, qp->entries_n *
                                        sizeof(struct mlx5_gga_compress_opaque),
                                                         &qp->opaque_mr) != 0) {
                rte_free(opaq_buf);
@@ -469,8 +467,8 @@ mlx5_compress_addr2mr(struct mlx5_compress_priv *priv, uintptr_t addr,
        if (likely(lkey != UINT32_MAX))
                return lkey;
        /* Take slower bottom-half on miss. */
-       return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
-                                 !!(ol_flags & EXT_ATTACHED_MBUF));
+       return mlx5_mr_addr2mr_bh(priv->cdev->pd, 0, &priv->mr_scache, mr_ctrl,
+                                 addr, !!(ol_flags & EXT_ATTACHED_MBUF));
 }
 
 static __rte_always_inline uint32_t
@@ -691,12 +689,8 @@ mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
 }
 
 static void
-mlx5_compress_hw_global_release(struct mlx5_compress_priv *priv)
+mlx5_compress_uar_release(struct mlx5_compress_priv *priv)
 {
-       if (priv->pd != NULL) {
-               claim_zero(mlx5_glue->dealloc_pd(priv->pd));
-               priv->pd = NULL;
-       }
        if (priv->uar != NULL) {
                mlx5_glue->devx_free_uar(priv->uar);
                priv->uar = NULL;
@@ -704,46 +698,12 @@ mlx5_compress_hw_global_release(struct mlx5_compress_priv *priv)
 }
 
 static int
-mlx5_compress_pd_create(struct mlx5_compress_priv *priv)
+mlx5_compress_uar_prepare(struct mlx5_compress_priv *priv)
 {
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-       struct mlx5dv_obj obj;
-       struct mlx5dv_pd pd_info;
-       int ret;
-
-       priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
-       if (priv->pd == NULL) {
-               DRV_LOG(ERR, "Failed to allocate PD.");
-               return errno ? -errno : -ENOMEM;
-       }
-       obj.pd.in = priv->pd;
-       obj.pd.out = &pd_info;
-       ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
-       if (ret != 0) {
-               DRV_LOG(ERR, "Fail to get PD object info.");
-               mlx5_glue->dealloc_pd(priv->pd);
-               priv->pd = NULL;
-               return -errno;
-       }
-       priv->pdn = pd_info.pdn;
-       return 0;
-#else
-       (void)priv;
-       DRV_LOG(ERR, "Cannot get pdn - no DV support.");
-       return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
-static int
-mlx5_compress_hw_global_prepare(struct mlx5_compress_priv *priv)
-{
-       if (mlx5_compress_pd_create(priv) != 0)
-               return -1;
        priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
        if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
            NULL) {
                rte_errno = errno;
-               claim_zero(mlx5_glue->dealloc_pd(priv->pd));
                DRV_LOG(ERR, "Failed to allocate UAR.");
                return -1;
        }
@@ -839,14 +799,14 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
        priv->compressdev = compressdev;
        priv->min_block_size = att.compress_min_block_size;
        priv->qp_ts_format = att.qp_ts_format;
-       if (mlx5_compress_hw_global_prepare(priv) != 0) {
+       if (mlx5_compress_uar_prepare(priv) != 0) {
                rte_compressdev_pmd_destroy(priv->compressdev);
                return -1;
        }
        if (mlx5_mr_btree_init(&priv->mr_scache.cache,
                             MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
                DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
-               mlx5_compress_hw_global_release(priv);
+               mlx5_compress_uar_release(priv);
                rte_compressdev_pmd_destroy(priv->compressdev);
                rte_errno = ENOMEM;
                return -rte_errno;
@@ -881,7 +841,7 @@ mlx5_compress_dev_remove(struct mlx5_common_device *cdev)
                        rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
                                                          NULL);
                mlx5_mr_release_cache(&priv->mr_scache);
-               mlx5_compress_hw_global_release(priv);
+               mlx5_compress_uar_release(priv);
                rte_compressdev_pmd_destroy(priv->compressdev);
        }
        return 0;
index 10ac633c77d3086a487a80f150be4a0b11f5073e..b22b7836e1f129dc8ac0819fd6b075e09fa4d88f 100644 (file)
@@ -333,8 +333,8 @@ mlx5_crypto_addr2mr(struct mlx5_crypto_priv *priv, uintptr_t addr,
        if (likely(lkey != UINT32_MAX))
                return lkey;
        /* Take slower bottom-half on miss. */
-       return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
-                                 !!(ol_flags & EXT_ATTACHED_MBUF));
+       return mlx5_mr_addr2mr_bh(priv->cdev->pd, 0, &priv->mr_scache, mr_ctrl,
+                                 addr, !!(ol_flags & EXT_ATTACHED_MBUF));
 }
 
 static __rte_always_inline uint32_t
@@ -610,7 +610,7 @@ mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,
        struct mlx5_umr_wqe *umr;
        uint32_t i;
        struct mlx5_devx_mkey_attr attr = {
-               .pd = priv->pdn,
+               .pd = priv->cdev->pdn,
                .umr_en = 1,
                .crypto_en = 1,
                .set_remote_rw = 1,
@@ -664,7 +664,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
                DRV_LOG(ERR, "Failed to create CQ.");
                goto error;
        }
-       attr.pd = priv->pdn;
+       attr.pd = priv->cdev->pdn;
        attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
        attr.cqn = qp->cq_obj.cq->id;
        attr.rq_size = 0;
@@ -754,12 +754,8 @@ static struct rte_cryptodev_ops mlx5_crypto_ops = {
 };
 
 static void
-mlx5_crypto_hw_global_release(struct mlx5_crypto_priv *priv)
+mlx5_crypto_uar_release(struct mlx5_crypto_priv *priv)
 {
-       if (priv->pd != NULL) {
-               claim_zero(mlx5_glue->dealloc_pd(priv->pd));
-               priv->pd = NULL;
-       }
        if (priv->uar != NULL) {
                mlx5_glue->devx_free_uar(priv->uar);
                priv->uar = NULL;
@@ -767,47 +763,13 @@ mlx5_crypto_hw_global_release(struct mlx5_crypto_priv *priv)
 }
 
 static int
-mlx5_crypto_pd_create(struct mlx5_crypto_priv *priv)
+mlx5_crypto_uar_prepare(struct mlx5_crypto_priv *priv)
 {
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-       struct mlx5dv_obj obj;
-       struct mlx5dv_pd pd_info;
-       int ret;
-
-       priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
-       if (priv->pd == NULL) {
-               DRV_LOG(ERR, "Failed to allocate PD.");
-               return errno ? -errno : -ENOMEM;
-       }
-       obj.pd.in = priv->pd;
-       obj.pd.out = &pd_info;
-       ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
-       if (ret != 0) {
-               DRV_LOG(ERR, "Fail to get PD object info.");
-               mlx5_glue->dealloc_pd(priv->pd);
-               priv->pd = NULL;
-               return -errno;
-       }
-       priv->pdn = pd_info.pdn;
-       return 0;
-#else
-       (void)priv;
-       DRV_LOG(ERR, "Cannot get pdn - no DV support.");
-       return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
-static int
-mlx5_crypto_hw_global_prepare(struct mlx5_crypto_priv *priv)
-{
-       if (mlx5_crypto_pd_create(priv) != 0)
-               return -1;
        priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
        if (priv->uar)
                priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
        if (priv->uar == NULL || priv->uar_addr == NULL) {
                rte_errno = errno;
-               claim_zero(mlx5_glue->dealloc_pd(priv->pd));
                DRV_LOG(ERR, "Failed to allocate UAR.");
                return -1;
        }
@@ -1011,14 +973,14 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
        priv->login_obj = login;
        priv->crypto_dev = crypto_dev;
        priv->qp_ts_format = attr.qp_ts_format;
-       if (mlx5_crypto_hw_global_prepare(priv) != 0) {
+       if (mlx5_crypto_uar_prepare(priv) != 0) {
                rte_cryptodev_pmd_destroy(priv->crypto_dev);
                return -1;
        }
        if (mlx5_mr_btree_init(&priv->mr_scache.cache,
                             MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
                DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
-               mlx5_crypto_hw_global_release(priv);
+               mlx5_crypto_uar_release(priv);
                rte_cryptodev_pmd_destroy(priv->crypto_dev);
                rte_errno = ENOMEM;
                return -rte_errno;
@@ -1066,7 +1028,7 @@ mlx5_crypto_dev_remove(struct mlx5_common_device *cdev)
                        rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
                                                          NULL);
                mlx5_mr_release_cache(&priv->mr_scache);
-               mlx5_crypto_hw_global_release(priv);
+               mlx5_crypto_uar_release(priv);
                rte_cryptodev_pmd_destroy(priv->crypto_dev);
                claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
        }
index 14dd3b9c9a2017a4833d0fbf3b60f9123982fee0..27ae9cff2cec7edd9db4d625a3a899176c903eb6 100644 (file)
@@ -23,10 +23,8 @@ struct mlx5_crypto_priv {
        struct rte_cryptodev *crypto_dev;
        void *uar; /* User Access Region. */
        volatile uint64_t *uar_addr;
-       uint32_t pdn; /* Protection Domain number. */
        uint32_t max_segs_num; /* Maximum supported data segs. */
        uint8_t qp_ts_format; /* Whether QP supports timestamp formats. */
-       struct ibv_pd *pd;
        struct mlx5_hlist *dek_hlist; /* Dek hash list. */
        struct rte_cryptodev_config dev_config;
        struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
index 94f21ec036f123c5e4b686b48c40e8b3119dd17f..de0d2545d185d8fc107eb98c0c0ccaa34bc6fca1 100644 (file)
@@ -94,7 +94,7 @@ mlx5_crypto_dek_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
        struct mlx5_crypto_dek *dek = rte_zmalloc(__func__, sizeof(*dek),
                                                  RTE_CACHE_LINE_SIZE);
        struct mlx5_devx_dek_attr dek_attr = {
-               .pd = ctx->priv->pdn,
+               .pd = ctx->priv->cdev->pdn,
                .key_purpose = MLX5_CRYPTO_KEY_PURPOSE_AES_XTS,
                .has_keytag = 1,
        };
index 35b2dfd3b296d08a8dd986517d40237b28197491..286a7caf36085c499ceca43b844383c57fe09711 100644 (file)
@@ -90,7 +90,7 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
        switch (param->type) {
        case MLX5_MP_REQ_CREATE_MR:
                mp_init_msg(&priv->mp_id, &mp_res, param->type);
-               lkey = mlx5_mr_create_primary(priv->sh->pd,
+               lkey = mlx5_mr_create_primary(cdev->pd,
                                              &priv->sh->share_cache,
                                              &entry, param->args.addr,
                                              cdev->config.mr_ext_memseg_en);
index 53ed7ab4982b30ed0440e6e6f875e340cd099657..0623e7ac3d7193e9083e716eb21177c2d3a740ea 100644 (file)
@@ -785,7 +785,7 @@ mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
                                                    .wq_type = IBV_WQT_RQ,
                                                    .max_wr = 1,
                                                    .max_sge = 1,
-                                                   .pd = priv->sh->pd,
+                                                   .pd = priv->sh->cdev->pd,
                                                    .cq = cq,
                                                });
                        if (wq) {
@@ -2731,41 +2731,6 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev)
                return mlx5_os_auxiliary_probe(cdev);
 }
 
-/**
- * Extract pdn of PD object using DV API.
- *
- * @param[in] pd
- *   Pointer to the verbs PD object.
- * @param[out] pdn
- *   Pointer to the PD object number variable.
- *
- * @return
- *   0 on success, error value otherwise.
- */
-int
-mlx5_os_get_pdn(void *pd, uint32_t *pdn)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-       struct mlx5dv_obj obj;
-       struct mlx5dv_pd pd_info;
-       int ret = 0;
-
-       obj.pd.in = pd;
-       obj.pd.out = &pd_info;
-       ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
-       if (ret) {
-               DRV_LOG(DEBUG, "Fail to get PD object info");
-               return ret;
-       }
-       *pdn = pd_info.pdn;
-       return 0;
-#else
-       (void)pd;
-       (void)pdn;
-       return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
 /**
  * Install shared asynchronous device events handler.
  * This function is implemented to support event sharing
index 981fc2ee7c354b809a5384b81692cfe370ef59e6..fb10dd08390f85c41bc6f47d6ae3691f26afb471 100644 (file)
@@ -289,7 +289,7 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
                .max_wr = wqe_n >> rxq_data->sges_n,
                /* Max number of scatter/gather elements in a WR. */
                .max_sge = 1 << rxq_data->sges_n,
-               .pd = priv->sh->pd,
+               .pd = priv->sh->cdev->pd,
                .cq = rxq_obj->ibv_cq,
                .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
                .create_flags = (rxq_data->vlan_strip ?
@@ -627,7 +627,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
                                        .rx_hash_fields_mask = hash_fields,
                                },
                                .rwq_ind_tbl = ind_tbl->ind_table,
-                               .pd = priv->sh->pd,
+                               .pd = priv->sh->cdev->pd,
                          },
                          &qp_init_attr);
 #else
@@ -648,7 +648,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
                                        .rx_hash_fields_mask = hash_fields,
                                },
                                .rwq_ind_tbl = ind_tbl->ind_table,
-                               .pd = priv->sh->pd,
+                               .pd = priv->sh->cdev->pd,
                         });
 #endif
        if (!qp) {
@@ -741,7 +741,7 @@ mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
                                                    .wq_type = IBV_WQT_RQ,
                                                    .max_wr = 1,
                                                    .max_sge = 1,
-                                                   .pd = priv->sh->pd,
+                                                   .pd = priv->sh->cdev->pd,
                                                    .cq = rxq->ibv_cq,
                                              });
        if (!rxq->wq) {
@@ -807,7 +807,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
                                .rx_hash_fields_mask = 0,
                                },
                        .rwq_ind_tbl = ind_tbl,
-                       .pd = priv->sh->pd
+                       .pd = priv->sh->cdev->pd
                 });
        if (!hrxq->qp) {
                DRV_LOG(DEBUG, "Port %u cannot allocate QP for drop queue.",
@@ -895,7 +895,7 @@ mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)
        qp_attr.qp_type = IBV_QPT_RAW_PACKET,
        /* Do *NOT* enable this, completions events are managed per Tx burst. */
        qp_attr.sq_sig_all = 0;
-       qp_attr.pd = priv->sh->pd;
+       qp_attr.pd = priv->sh->cdev->pd;
        qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD;
        if (txq_data->inlen_send)
                qp_attr.cap.max_inline_data = txq_ctrl->max_inline_data;
@@ -1117,7 +1117,7 @@ mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
                                &(struct ibv_qp_init_attr_ex){
                                        .qp_type = IBV_QPT_RAW_PACKET,
                                        .comp_mask = IBV_QP_INIT_ATTR_PD,
-                                       .pd = sh->pd,
+                                       .pd = sh->cdev->pd,
                                        .send_cq = sh->self_lb.ibv_cq,
                                        .recv_cq = sh->self_lb.ibv_cq,
                                        .cap.max_recv_wr = 1,
index 3afcc096b9f0347f6d2d522bfce68ab9750b381c..8544d267674abaf5f4f6013fb5c64c81c83f35e1 100644 (file)
@@ -1139,7 +1139,7 @@ mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
        mlx5_mp_id_init(&mp_id, 0);
        if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0)
                DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
-                       mp->name, sh->pd, rte_strerror(rte_errno));
+                       mp->name, sh->cdev->pd, rte_strerror(rte_errno));
 }
 
 /**
@@ -1159,10 +1159,11 @@ mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
        int ret;
 
        mlx5_mp_id_init(&mp_id, 0);
-       ret = mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp, &mp_id);
+       ret = mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
+                                      &mp_id);
        if (ret < 0 && rte_errno != EEXIST)
                DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
-                       mp->name, sh->pd, rte_strerror(rte_errno));
+                       mp->name, sh->cdev->pd, rte_strerror(rte_errno));
 }
 
 /**
@@ -1201,10 +1202,11 @@ mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
        switch (event) {
        case RTE_MEMPOOL_EVENT_READY:
                mlx5_mp_id_init(&mp_id, 0);
-               if (mlx5_mr_mempool_register(&sh->share_cache, sh->pd, mp,
+               if (mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
                                             &mp_id) < 0)
                        DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
-                               mp->name, sh->pd, rte_strerror(rte_errno));
+                               mp->name, sh->cdev->pd,
+                               rte_strerror(rte_errno));
                break;
        case RTE_MEMPOOL_EVENT_DESTROY:
                mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
@@ -1336,18 +1338,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
                sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
        }
-       sh->pd = mlx5_os_alloc_pd(sh->cdev->ctx);
-       if (sh->pd == NULL) {
-               DRV_LOG(ERR, "PD allocation failure");
-               err = ENOMEM;
-               goto error;
-       }
        if (sh->devx) {
-               err = mlx5_os_get_pdn(sh->pd, &sh->pdn);
-               if (err) {
-                       DRV_LOG(ERR, "Fail to extract pdn from PD");
-                       goto error;
-               }
                sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
                if (!sh->td) {
                        DRV_LOG(ERR, "TD allocation failure");
@@ -1428,8 +1419,6 @@ error:
                mlx5_glue->devx_free_uar(sh->devx_rx_uar);
        if (sh->tx_uar)
                mlx5_glue->devx_free_uar(sh->tx_uar);
-       if (sh->pd)
-               claim_zero(mlx5_os_dealloc_pd(sh->pd));
        mlx5_free(sh);
        MLX5_ASSERT(err > 0);
        rte_errno = err;
@@ -1506,8 +1495,6 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
                mlx5_glue->devx_free_uar(sh->tx_uar);
                sh->tx_uar = NULL;
        }
-       if (sh->pd)
-               claim_zero(mlx5_os_dealloc_pd(sh->pd));
        if (sh->tis)
                claim_zero(mlx5_devx_cmd_destroy(sh->tis));
        if (sh->td)
index b9e78eab8662d5354d02afbba4101a097df3671a..a9caa3a897a15546221ea5e881f0f52cb0edb24f 100644 (file)
@@ -1133,8 +1133,6 @@ struct mlx5_dev_ctx_shared {
        uint32_t max_port; /* Maximal IB device port index. */
        struct mlx5_bond_info bond; /* Bonding information. */
        struct mlx5_common_device *cdev; /* Backend mlx5 device. */
-       void *pd; /* Protection Domain. */
-       uint32_t pdn; /* Protection Domain number. */
        uint32_t tdn; /* Transport Domain number. */
        char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
        char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
@@ -1760,7 +1758,6 @@ void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
 struct rte_pci_driver;
 int mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *dev_attr);
 void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
-int mlx5_os_get_pdn(void *pd, uint32_t *pdn);
 int mlx5_os_net_probe(struct mlx5_common_device *cdev);
 void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
 void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh);
index 1fb835cb0d2cbaf0b04a1e07f09ad4fabe2b511b..b98b82bf79071ae10b5279b8eb49d74e16117ca3 100644 (file)
@@ -276,7 +276,7 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
        rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
                                                MLX5_WQ_END_PAD_MODE_ALIGN :
                                                MLX5_WQ_END_PAD_MODE_NONE;
-       rq_attr.wq_attr.pd = priv->sh->pdn;
+       rq_attr.wq_attr.pd = priv->sh->cdev->pdn;
        rq_attr.counter_set_id = priv->counter_set_id;
        /* Create RQ using DevX API. */
        return mlx5_devx_rq_create(priv->sh->cdev->ctx, &rxq_ctrl->obj->rq_obj,
@@ -994,7 +994,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
                .tis_lst_sz = 1,
                .tis_num = priv->sh->tis->id,
                .wq_attr = (struct mlx5_devx_wq_attr){
-                       .pd = priv->sh->pdn,
+                       .pd = priv->sh->cdev->pdn,
                        .uar_page =
                                 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),
                },
index e6a476a5466323835619a723e14f812953965415..ffcc031bff362386bf2a57c3221ce840dcfcfb29 100644 (file)
@@ -7662,7 +7662,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
        mkey_attr.addr = (uintptr_t)mem;
        mkey_attr.size = size;
        mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
-       mkey_attr.pd = sh->pdn;
+       mkey_attr.pd = sh->cdev->pdn;
        mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
        mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
        mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr);
index 49eec7a6b656df6cfefb91d15c3b37fec99602f1..17e3f2a300143bf2f135866ffa7d03bf060b8634 100644 (file)
@@ -103,7 +103,7 @@ mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length,
                DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
                return -1;
        }
-       ret = sh->share_cache.reg_mr_cb(sh->pd, mr->addr, length, mr);
+       ret = sh->share_cache.reg_mr_cb(sh->cdev->pd, mr->addr, length, mr);
        if (ret) {
                DRV_LOG(ERR, "Failed to create direct Mkey.");
                mlx5_free(mr->addr);
@@ -317,8 +317,9 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
                                    sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
                        return -1;
                if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
-                                 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
-                                 sh->sq_ts_format)) {
+                                      sh->tx_uar, cdev->pdn,
+                                      MLX5_ASO_QUEUE_LOG_DESC,
+                                      sh->sq_ts_format)) {
                        mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
                        return -1;
                }
@@ -326,8 +327,9 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
                break;
        case ASO_OPC_MOD_POLICER:
                if (mlx5_aso_sq_create(cdev->ctx, &sh->mtrmng->pools_mng.sq, 0,
-                                 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
-                                 sh->sq_ts_format))
+                                      sh->tx_uar, cdev->pdn,
+                                      MLX5_ASO_QUEUE_LOG_DESC,
+                                      sh->sq_ts_format))
                        return -1;
                mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
                break;
@@ -337,8 +339,9 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
                                    &sh->ct_mng->aso_sq.mr, 0))
                        return -1;
                if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
-                               sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
-                               sh->sq_ts_format)) {
+                                      sh->tx_uar, cdev->pdn,
+                                      MLX5_ASO_QUEUE_LOG_DESC,
+                                      sh->sq_ts_format)) {
                        mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
                        return -1;
                }
index e5c25b7c41fb9a628c049ae52180ce83af43b91b..f2fde91294755ef7f79035f628062bfe861f690f 100644 (file)
@@ -6357,12 +6357,10 @@ flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
  *   NULL otherwise and rte_errno is set.
  */
 static struct mlx5_aso_mtr_pool *
-flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
-                            struct mlx5_aso_mtr **mtr_free)
+flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_aso_mtr_pools_mng *pools_mng =
-                               &priv->sh->mtrmng->pools_mng;
+       struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
        struct mlx5_aso_mtr_pool *pool = NULL;
        struct mlx5_devx_obj *dcs = NULL;
        uint32_t i;
@@ -6370,7 +6368,8 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
 
        log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
        dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
-                       priv->sh->pdn, log_obj_size);
+                                                     priv->sh->cdev->pdn,
+                                                     log_obj_size);
        if (!dcs) {
                rte_errno = ENODATA;
                return NULL;
@@ -6392,8 +6391,7 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
        pools_mng->n_valid++;
        for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
                pool->mtrs[i].offset = i;
-               LIST_INSERT_HEAD(&pools_mng->meters,
-                                               &pool->mtrs[i], next);
+               LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
        }
        pool->mtrs[0].offset = 0;
        *mtr_free = &pool->mtrs[0];
@@ -11866,7 +11864,7 @@ flow_dv_age_pool_create(struct rte_eth_dev *dev,
        uint32_t i;
 
        obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
-                                                   priv->sh->pdn);
+                                                   priv->sh->cdev->pdn);
        if (!obj) {
                rte_errno = ENODATA;
                DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
@@ -12294,7 +12292,8 @@ flow_dv_ct_pool_create(struct rte_eth_dev *dev,
        uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
 
        obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
-                                               priv->sh->pdn, log_obj_size);
+                                                         priv->sh->cdev->pdn,
+                                                         log_obj_size);
        if (!obj) {
                rte_errno = ENODATA;
                DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
index 05e350665429167077c98c60698335121db921b0..b53f905cf318598618a84b0974f1a33d6cc0a8e3 100644 (file)
@@ -84,7 +84,7 @@ mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
        struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
        struct mlx5_priv *priv = txq_ctrl->priv;
 
-       return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
+       return mlx5_mr_addr2mr_bh(priv->sh->cdev->pd, &priv->mp_id,
                                  &priv->sh->share_cache, mr_ctrl, addr,
                                  priv->sh->cdev->config.mr_ext_memseg_en);
 }
@@ -180,7 +180,7 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
                return;
        DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
                dev->data->port_id, mem_idx, mp->name);
-       mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
+       mr = mlx5_create_mr_ext(sh->cdev->pd, addr, len, mp->socket_id,
                                sh->share_cache.reg_mr_cb);
        if (!mr) {
                DRV_LOG(WARNING,
@@ -196,8 +196,8 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
        mlx5_mr_insert_cache(&sh->share_cache, mr);
        rte_rwlock_write_unlock(&sh->share_cache.rwlock);
        /* Insert to the local cache table */
-       mlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache, mr_ctrl,
-                          addr, priv->sh->cdev->config.mr_ext_memseg_en);
+       mlx5_mr_addr2mr_bh(sh->cdev->pd, &priv->mp_id, &sh->share_cache,
+                          mr_ctrl, addr, sh->cdev->config.mr_ext_memseg_en);
 }
 
 /**
@@ -256,8 +256,8 @@ mlx5_net_dma_map(struct rte_device *rte_dev, void *addr,
        }
        priv = dev->data->dev_private;
        sh = priv->sh;
-       mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
-                               sh->share_cache.reg_mr_cb);
+       mr = mlx5_create_mr_ext(sh->cdev->pd, (uintptr_t)addr, len,
+                               SOCKET_ID_ANY, sh->share_cache.reg_mr_cb);
        if (!mr) {
                DRV_LOG(WARNING,
                        "port %u unable to dma map", dev->data->port_id);
index 60673d014d02d0710e2e0ecdd3c15b426e1fc79a..8c731f27160ab664e85509d209fb8b41be6a9183 100644 (file)
@@ -1241,8 +1241,8 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
                rte_errno = ENOMEM;
                return -rte_errno;
        }
-       ret = mlx5_mr_mempool_register(&priv->sh->share_cache, priv->sh->pd,
-                                      mp, &priv->mp_id);
+       ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
+                                      priv->sh->cdev->pd, mp, &priv->mp_id);
        if (ret < 0 && rte_errno != EEXIST) {
                ret = rte_errno;
                DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
index e93647aafd563a6cb4f3d354eaa74cf825544d4d..cf4fbd3c9f55cd4be916ac0d3f69412fef93b994 100644 (file)
@@ -149,7 +149,8 @@ mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
        for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
                mp = rxq_ctrl->rxq.rxseg[s].mp;
                ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
-                                              priv->sh->pd, mp, &priv->mp_id);
+                                              priv->sh->cdev->pd, mp,
+                                              &priv->mp_id);
                if (ret < 0 && rte_errno != EEXIST)
                        return ret;
                rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
index 6dd362c48ada79067b1b82f62ab849e0b338f830..fb7b36197c3318bb19b5b34649ae8bc317a1a6de 100644 (file)
@@ -232,7 +232,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
                .tis_lst_sz = 1,
                .tis_num = sh->tis->id,
                .wq_attr = (struct mlx5_devx_wq_attr){
-                       .pd = sh->pdn,
+                       .pd = sh->cdev->pdn,
                        .uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
                },
                .ts_format = mlx5_ts_format_conv(sh->sq_ts_format),
@@ -444,7 +444,7 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
        sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
        sq_attr.wq_attr.cd_slave = 1;
        sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
-       sq_attr.wq_attr.pd = sh->pdn;
+       sq_attr.wq_attr.pd = sh->cdev->pdn;
        sq_attr.ts_format = mlx5_ts_format_conv(sh->sq_ts_format);
        ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
                                  log2above(wq->sq_size),
index c70d636dbcd10fa12e55b565772f7d5ddd811f4a..4f1d37fc896dde8b8268a061dc24a71eda4cbe3c 100644 (file)
@@ -1002,25 +1002,4 @@ mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
        *dereg_mr_cb = mlx5_os_dereg_mr;
 }
 
-/**
- * Extract pdn of PD object using DevX
- *
- * @param[in] pd
- *   Pointer to the DevX PD object.
- * @param[out] pdn
- *   Pointer to the PD object number variable.
- *
- * @return
- *   0 on success, error value otherwise.
- */
-int
-mlx5_os_get_pdn(void *pd, uint32_t *pdn)
-{
-       if (!pd)
-               return -EINVAL;
-
-       *pdn = ((struct mlx5_pd *)pd)->pdn;
-       return 0;
-}
-
 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};
index f915a9d047b4aa82ae6d4dbdcc38182d492bfad8..54d3e64f436f889df3a63a64c0af5f3df7acea8a 100644 (file)
@@ -187,12 +187,6 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
                rte_errno = ENOMEM;
                goto error;
        }
-       priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
-       if (!priv->pd) {
-               DRV_LOG(ERR, "can't allocate pd.");
-               rte_errno = ENOMEM;
-               goto error;
-       }
        priv->regexdev->dev_ops = &mlx5_regexdev_ops;
        priv->regexdev->enqueue = mlx5_regexdev_enqueue;
 #ifdef HAVE_MLX5_UMR_IMKEY
@@ -230,8 +224,6 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
        return 0;
 
 error:
-       if (priv->pd)
-               mlx5_glue->dealloc_pd(priv->pd);
        if (priv->uar)
                mlx5_glue->devx_free_uar(priv->uar);
        if (priv->regexdev)
@@ -264,8 +256,6 @@ mlx5_regex_dev_remove(struct mlx5_common_device *cdev)
                                                          NULL);
                if (priv->mr_scache.cache.table)
                        mlx5_mr_release_cache(&priv->mr_scache);
-               if (priv->pd)
-                       mlx5_glue->dealloc_pd(priv->pd);
                if (priv->uar)
                        mlx5_glue->devx_free_uar(priv->uar);
                if (priv->regexdev)
index 1d1906751392abac1f51becbafd052312e683c02..c128b7acbb7cc1ec352c02f164650e145ec45b1b 100644 (file)
@@ -68,7 +68,6 @@ struct mlx5_regex_priv {
                                MLX5_RXP_EM_COUNT];
        uint32_t nb_engines; /* Number of RegEx engines. */
        struct mlx5dv_devx_uar *uar; /* UAR object. */
-       struct ibv_pd *pd;
        TAILQ_ENTRY(mlx5_regex_priv) mem_event_cb;
        /**< Called by memory event callback. */
        struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
@@ -79,26 +78,6 @@ struct mlx5_regex_priv {
        uint32_t mmo_regex_sq_cap:1;
 };
 
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-static inline int
-regex_get_pdn(void *pd, uint32_t *pdn)
-{
-       struct mlx5dv_obj obj;
-       struct mlx5dv_pd pd_info;
-       int ret = 0;
-
-       obj.pd.in = pd;
-       obj.pd.out = &pd_info;
-       ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
-       if (ret) {
-               DRV_LOG(DEBUG, "Fail to get PD object info");
-               return ret;
-       }
-       *pdn = pd_info.pdn;
-       return 0;
-}
-#endif
-
 /* mlx5_regex.c */
 int mlx5_regex_start(struct rte_regexdev *dev);
 int mlx5_regex_stop(struct rte_regexdev *dev);
index fa95ce72c98b96a9034a58ac7b7990ce57044ece..1136de1d7efbf67718e5b84a059ca7fe29546e0a 100644 (file)
@@ -138,21 +138,17 @@ regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
        struct mlx5_devx_qp_attr attr = {
                .cqn = qp->cq.cq_obj.cq->id,
                .uar_index = priv->uar->page_id,
+               .pd = priv->cdev->pdn,
                .ts_format = mlx5_ts_format_conv(priv->qp_ts_format),
                .user_index = q_ind,
        };
        struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
-       uint32_t pd_num = 0;
        int ret;
 
        qp_obj->log_nb_desc = log_nb_desc;
        qp_obj->qpn = q_ind;
        qp_obj->ci = 0;
        qp_obj->pi = 0;
-       ret = regex_get_pdn(priv->pd, &pd_num);
-       if (ret)
-               return ret;
-       attr.pd = pd_num;
        attr.rq_size = 0;
        attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr,
                        log_nb_desc));
index f9d79d3549155c9558be612c893108132beac6d7..575b6397523877687f0140ea4dbe0d8850f2efcf 100644 (file)
@@ -138,8 +138,8 @@ mlx5_regex_addr2mr(struct mlx5_regex_priv *priv, struct mlx5_mr_ctrl *mr_ctrl,
        if (likely(lkey != UINT32_MAX))
                return lkey;
        /* Take slower bottom-half on miss. */
-       return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
-                                 !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
+       return mlx5_mr_addr2mr_bh(priv->cdev->pd, 0, &priv->mr_scache, mr_ctrl,
+                                 addr, !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
 }
 
 
@@ -639,7 +639,7 @@ setup_qps(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *queue)
 static int
 setup_buffers(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp)
 {
-       struct ibv_pd *pd = priv->pd;
+       struct ibv_pd *pd = priv->cdev->pd;
        uint32_t i;
        int err;
 
@@ -746,12 +746,7 @@ mlx5_regexdev_setup_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id)
 
        if (priv->has_umr) {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-               if (regex_get_pdn(priv->pd, &attr.pd)) {
-                       err = -rte_errno;
-                       DRV_LOG(ERR, "Failed to get pdn.");
-                       mlx5_regexdev_teardown_fastpath(priv, qp_id);
-                       return err;
-               }
+               attr.pd = priv->cdev->pdn;
 #endif
                for (i = 0; i < qp->nb_desc; i++) {
                        attr.klm_num = MLX5_REGEX_MAX_KLM_NUM;
index 2468202ceb8873dccbda18e2b569509399f64b9f..fe68ab02520a906417bd526a0cea33dbb8544793 100644 (file)
@@ -188,37 +188,6 @@ mlx5_vdpa_features_set(int vid)
        return 0;
 }
 
-static int
-mlx5_vdpa_pd_create(struct mlx5_vdpa_priv *priv)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-       priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
-       if (priv->pd == NULL) {
-               DRV_LOG(ERR, "Failed to allocate PD.");
-               return errno ? -errno : -ENOMEM;
-       }
-       struct mlx5dv_obj obj;
-       struct mlx5dv_pd pd_info;
-       int ret = 0;
-
-       obj.pd.in = priv->pd;
-       obj.pd.out = &pd_info;
-       ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
-       if (ret) {
-               DRV_LOG(ERR, "Fail to get PD object info.");
-               mlx5_glue->dealloc_pd(priv->pd);
-               priv->pd = NULL;
-               return -errno;
-       }
-       priv->pdn = pd_info.pdn;
-       return 0;
-#else
-       (void)priv;
-       DRV_LOG(ERR, "Cannot get pdn - no DV support.");
-       return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-}
-
 static int
 mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)
 {
@@ -289,10 +258,6 @@ mlx5_vdpa_dev_close(int vid)
        mlx5_vdpa_virtqs_release(priv);
        mlx5_vdpa_event_qp_global_release(priv);
        mlx5_vdpa_mem_dereg(priv);
-       if (priv->pd) {
-               claim_zero(mlx5_glue->dealloc_pd(priv->pd));
-               priv->pd = NULL;
-       }
        priv->configured = 0;
        priv->vid = 0;
        /* The mutex may stay locked after event thread cancel - initiate it. */
@@ -320,8 +285,7 @@ mlx5_vdpa_dev_config(int vid)
        if (mlx5_vdpa_mtu_set(priv))
                DRV_LOG(WARNING, "MTU cannot be set on device %s.",
                                vdev->device->name);
-       if (mlx5_vdpa_pd_create(priv) || mlx5_vdpa_mem_register(priv) ||
-           mlx5_vdpa_err_event_setup(priv) ||
+       if (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_err_event_setup(priv) ||
            mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||
            mlx5_vdpa_cqe_event_setup(priv)) {
                mlx5_vdpa_dev_close(vid);
index 1fe57c72b8ab21df8286a9fdadf20c98bc2c7440..d9a68e701e3ef463b11536a7b5dd0cb13d4fabb9 100644 (file)
@@ -131,8 +131,6 @@ struct mlx5_vdpa_priv {
        struct mlx5_common_device *cdev; /* Backend mlx5 device. */
        int vid; /* vhost device id. */
        struct mlx5_hca_vdpa_attr caps;
-       uint32_t pdn; /* Protection Domain number. */
-       struct ibv_pd *pd;
        uint32_t gpa_mkey_index;
        struct ibv_mr *null_mr;
        struct rte_vhost_memory *vmem;
index 979a2abd41a35c9be5e9cea3c53ecc714d52d703..47f9afe855758519db14a052004ea0587dbc5cfc 100644 (file)
@@ -593,7 +593,7 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
                return -1;
        if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
                return -1;
-       attr.pd = priv->pdn;
+       attr.pd = priv->cdev->pdn;
        attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
        eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
        if (!eqp->fw_qp) {
index 0b0ffeb07db1c7cd2a96774eea10cce2631b5c4c..3e8d9eb9a2ec70970a74d3c89b67d56656eb90a7 100644 (file)
@@ -39,7 +39,7 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
        struct mlx5_devx_mkey_attr mkey_attr = {
                        .addr = (uintptr_t)log_base,
                        .size = log_size,
-                       .pd = priv->pdn,
+                       .pd = priv->cdev->pdn,
                        .pg_access = 1,
        };
        struct mlx5_devx_virtq_attr attr = {
index c5cdb3abd758c6aa94ba445a7978373900e7435a..f551a094cd2bb86ad72d8c0383c40779a2d926eb 100644 (file)
@@ -193,7 +193,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
        if (!mem)
                return -rte_errno;
        priv->vmem = mem;
-       priv->null_mr = mlx5_glue->alloc_null_mr(priv->pd);
+       priv->null_mr = mlx5_glue->alloc_null_mr(priv->cdev->pd);
        if (!priv->null_mr) {
                DRV_LOG(ERR, "Failed to allocate null MR.");
                ret = -errno;
@@ -220,7 +220,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
                mkey_attr.addr = (uintptr_t)(reg->guest_phys_addr);
                mkey_attr.size = reg->size;
                mkey_attr.umem_id = entry->umem->umem_id;
-               mkey_attr.pd = priv->pdn;
+               mkey_attr.pd = priv->cdev->pdn;
                mkey_attr.pg_access = 1;
                entry->mkey = mlx5_devx_cmd_mkey_create(priv->cdev->ctx,
                                                        &mkey_attr);
@@ -268,7 +268,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
        }
        mkey_attr.addr = (uintptr_t)(mem->regions[0].guest_phys_addr);
        mkey_attr.size = mem_size;
-       mkey_attr.pd = priv->pdn;
+       mkey_attr.pd = priv->cdev->pdn;
        mkey_attr.umem_id = 0;
        /* Must be zero for KLM mode. */
        mkey_attr.log_entity_size = mode == MLX5_MKC_ACCESS_MODE_KLM_FBS ?
index 5ef31de8348e1222e92b0d35e578f8b5233c4755..cfd50d92f5a86129826fa90d6746adb6d833c27b 100644 (file)
@@ -322,7 +322,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
        attr.mkey = priv->gpa_mkey_index;
        attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
        attr.queue_index = index;
-       attr.pd = priv->pdn;
+       attr.pd = priv->cdev->pdn;
        attr.hw_latency_mode = priv->hw_latency_mode;
        attr.hw_max_latency_us = priv->hw_max_latency_us;
        attr.hw_max_pending_comp = priv->hw_max_pending_comp;