]> git.droids-corp.org - dpdk.git/commitdiff
common/mlx5: share HCA capabilities handle
authorMichael Baum <michaelba@nvidia.com>
Tue, 19 Oct 2021 20:55:55 +0000 (23:55 +0300)
committerThomas Monjalon <thomas@monjalon.net>
Thu, 21 Oct 2021 13:53:46 +0000 (15:53 +0200)
Add HCA attributes structure as a field of device config structure.
It query in common probing, and updates the timestamp format fields.

Each driver use HCA attributes from common device config structure,
instead of query it for itself.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
18 files changed:
drivers/common/mlx5/mlx5_common.c
drivers/common/mlx5/mlx5_common.h
drivers/compress/mlx5/mlx5_compress.c
drivers/crypto/mlx5/mlx5_crypto.c
drivers/crypto/mlx5/mlx5_crypto.h
drivers/net/mlx5/linux/mlx5_os.c
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_devx.c
drivers/net/mlx5/mlx5_flow_aso.c
drivers/net/mlx5/mlx5_txpp.c
drivers/net/mlx5/windows/mlx5_os.c
drivers/regex/mlx5/mlx5_regex.c
drivers/regex/mlx5/mlx5_regex.h
drivers/regex/mlx5/mlx5_regex_control.c
drivers/vdpa/mlx5/mlx5_vdpa.c
drivers/vdpa/mlx5/mlx5_vdpa.h
drivers/vdpa/mlx5/mlx5_vdpa_event.c

index ec246c15f936d4d6d1d7e49c9ce456d017542f42..17a54acf1e0ff01d61c3f101ff49a55393e96ec5 100644 (file)
@@ -354,6 +354,16 @@ mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes)
        ret = mlx5_os_pd_create(cdev);
        if (ret)
                goto error;
+       /* All actions taken below are relevant only when DevX is supported */
+       if (cdev->config.devx == 0)
+               return 0;
+       /* Query HCA attributes. */
+       ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &cdev->config.hca_attr);
+       if (ret) {
+               DRV_LOG(ERR, "Unable to read HCA capabilities.");
+               rte_errno = ENOTSUP;
+               goto error;
+       }
        return 0;
 error:
        mlx5_dev_hw_global_release(cdev);
index d72002ca3c2a0b08b022b6b4da569103f8a88efc..a863fb2b26fc9faaf93468b0cb9535e9b2c19fd6 100644 (file)
@@ -332,6 +332,7 @@ void mlx5_common_init(void);
  *  - User device parameters disabled features.
  */
 struct mlx5_common_dev_config {
+       struct mlx5_hca_attr hca_attr; /* HCA attributes. */
        int dbnc; /* Skip doorbell register write barrier. */
        unsigned int devx:1; /* Whether devx interface is available or not. */
        unsigned int sys_mem_en:1; /* The default memory allocator. */
index 4c8e67c4df89707368edafb11b82893a47078800..8fe65293a648bd7084b9edc61a1639dc647743e2 100644 (file)
@@ -39,7 +39,6 @@ struct mlx5_compress_priv {
        struct mlx5_common_device *cdev; /* Backend mlx5 device. */
        void *uar;
        uint8_t min_block_size;
-       uint8_t qp_ts_format; /* Whether SQ supports timestamp formats. */
        /* Minimum huffman block size supported by the device. */
        struct rte_compressdev_config dev_config;
        LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
@@ -243,7 +242,8 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
                goto err;
        }
        qp_attr.cqn = qp->cq.cq->id;
-       qp_attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
+       qp_attr.ts_format =
+               mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
        qp_attr.rq_size = 0;
        qp_attr.sq_size = RTE_BIT32(log_ops_n);
        qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp
@@ -755,7 +755,7 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
 {
        struct rte_compressdev *compressdev;
        struct mlx5_compress_priv *priv;
-       struct mlx5_hca_attr att = { 0 };
+       struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
        struct rte_compressdev_pmd_init_params init_params = {
                .name = "",
                .socket_id = cdev->dev->numa_node,
@@ -767,10 +767,9 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
                rte_errno = ENOTSUP;
                return -rte_errno;
        }
-       if (mlx5_devx_cmd_query_hca_attr(cdev->ctx, &att) != 0 ||
-           ((att.mmo_compress_sq_en == 0 || att.mmo_decompress_sq_en == 0 ||
-               att.mmo_dma_sq_en == 0) && (att.mmo_compress_qp_en == 0 ||
-               att.mmo_decompress_qp_en == 0 || att.mmo_dma_qp_en == 0))) {
+       if ((attr->mmo_compress_sq_en == 0 || attr->mmo_decompress_sq_en == 0 ||
+           attr->mmo_dma_sq_en == 0) && (attr->mmo_compress_qp_en == 0 ||
+           attr->mmo_decompress_qp_en == 0 || attr->mmo_dma_qp_en == 0)) {
                DRV_LOG(ERR, "Not enough capabilities to support compress "
                        "operations, maybe old FW/OFED version?");
                rte_errno = ENOTSUP;
@@ -789,16 +788,15 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
        compressdev->enqueue_burst = mlx5_compress_enqueue_burst;
        compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
        priv = compressdev->data->dev_private;
-       priv->mmo_decomp_sq = att.mmo_decompress_sq_en;
-       priv->mmo_decomp_qp = att.mmo_decompress_qp_en;
-       priv->mmo_comp_sq = att.mmo_compress_sq_en;
-       priv->mmo_comp_qp = att.mmo_compress_qp_en;
-       priv->mmo_dma_sq = att.mmo_dma_sq_en;
-       priv->mmo_dma_qp = att.mmo_dma_qp_en;
+       priv->mmo_decomp_sq = attr->mmo_decompress_sq_en;
+       priv->mmo_decomp_qp = attr->mmo_decompress_qp_en;
+       priv->mmo_comp_sq = attr->mmo_compress_sq_en;
+       priv->mmo_comp_qp = attr->mmo_compress_qp_en;
+       priv->mmo_dma_sq = attr->mmo_dma_sq_en;
+       priv->mmo_dma_qp = attr->mmo_dma_qp_en;
        priv->cdev = cdev;
        priv->compressdev = compressdev;
-       priv->min_block_size = att.compress_min_block_size;
-       priv->qp_ts_format = att.qp_ts_format;
+       priv->min_block_size = attr->compress_min_block_size;
        if (mlx5_compress_uar_prepare(priv) != 0) {
                rte_compressdev_pmd_destroy(priv->compressdev);
                return -1;
index b22b7836e1f129dc8ac0819fd6b075e09fa4d88f..ff4c67c0a09e3fccb10d954f9827f479766a8a29 100644 (file)
@@ -669,7 +669,8 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
        attr.cqn = qp->cq_obj.cq->id;
        attr.rq_size = 0;
        attr.sq_size = RTE_BIT32(log_nb_desc);
-       attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
+       attr.ts_format =
+               mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
        ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp_obj, log_nb_desc,
                                  &attr, socket_id);
        if (ret) {
@@ -920,7 +921,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
        struct mlx5_devx_obj *login;
        struct mlx5_crypto_priv *priv;
        struct mlx5_crypto_devarg_params devarg_prms = { 0 };
-       struct mlx5_hca_attr attr = { 0 };
        struct rte_cryptodev_pmd_init_params init_params = {
                .name = "",
                .private_data_size = sizeof(struct mlx5_crypto_priv),
@@ -937,8 +937,7 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
                rte_errno = ENOTSUP;
                return -rte_errno;
        }
-       if (mlx5_devx_cmd_query_hca_attr(cdev->ctx, &attr) != 0 ||
-           attr.crypto == 0 || attr.aes_xts == 0) {
+       if (!cdev->config.hca_attr.crypto || !cdev->config.hca_attr.aes_xts) {
                DRV_LOG(ERR, "Not enough capabilities to support crypto "
                        "operations, maybe old FW/OFED version?");
                rte_errno = ENOTSUP;
@@ -972,7 +971,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
        priv->cdev = cdev;
        priv->login_obj = login;
        priv->crypto_dev = crypto_dev;
-       priv->qp_ts_format = attr.qp_ts_format;
        if (mlx5_crypto_uar_prepare(priv) != 0) {
                rte_cryptodev_pmd_destroy(priv->crypto_dev);
                return -1;
index 27ae9cff2cec7edd9db4d625a3a899176c903eb6..030f36942308e25bb8e2a15999bee6241b550510 100644 (file)
@@ -24,7 +24,6 @@ struct mlx5_crypto_priv {
        void *uar; /* User Access Region. */
        volatile uint64_t *uar_addr;
        uint32_t max_segs_num; /* Maximum supported data segs. */
-       uint8_t qp_ts_format; /* Whether QP supports timestamp formats. */
        struct mlx5_hlist *dek_hlist; /* Dek hash list. */
        struct rte_cryptodev_config dev_config;
        struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
index 0623e7ac3d7193e9083e716eb21177c2d3a740ea..6ee0257d338b5b760fa0acd0bd52b17ab928964b 100644 (file)
@@ -132,8 +132,8 @@ mlx5_os_set_nonblock_channel_fd(int fd)
  * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
  * device attributes from the glue out parameter.
  *
- * @param dev
- *   Pointer to ibv context.
+ * @param cdev
+ *   Pointer to mlx5 device.
  *
  * @param device_attr
  *   Pointer to mlx5 device attributes.
@@ -142,15 +142,17 @@ mlx5_os_set_nonblock_channel_fd(int fd)
  *   0 on success, non zero error number otherwise
  */
 int
-mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
+mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+                    struct mlx5_dev_attr *device_attr)
 {
        int err;
+       struct ibv_context *ctx = cdev->ctx;
        struct ibv_device_attr_ex attr_ex;
+
        memset(device_attr, 0, sizeof(*device_attr));
        err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
        if (err)
                return err;
-
        device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
        device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
        device_attr->max_sge = attr_ex.orig_attr.max_sge;
@@ -1333,27 +1335,9 @@ err_secondary:
                config->mps == MLX5_MPW ? "legacy " : "",
                config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
        if (sh->devx) {
-               err = mlx5_devx_cmd_query_hca_attr(sh->cdev->ctx,
-                                                  &config->hca_attr);
-               if (err) {
-                       err = -err;
-                       goto error;
-               }
-               /* Check relax ordering support. */
-               if (!haswell_broadwell_cpu) {
-                       sh->cmng.relaxed_ordering_write =
-                               config->hca_attr.relaxed_ordering_write;
-                       sh->cmng.relaxed_ordering_read =
-                               config->hca_attr.relaxed_ordering_read;
-               } else {
-                       sh->cmng.relaxed_ordering_read = 0;
-                       sh->cmng.relaxed_ordering_write = 0;
-               }
-               sh->rq_ts_format = config->hca_attr.rq_ts_format;
-               sh->sq_ts_format = config->hca_attr.sq_ts_format;
+               config->hca_attr = sh->cdev->config.hca_attr;
                sh->steering_format_version =
                        config->hca_attr.steering_format_version;
-               sh->qp_ts_format = config->hca_attr.qp_ts_format;
                /* Check for LRO support. */
                if (config->dest_tir && config->hca_attr.lro_cap &&
                    config->dv_flow_en) {
index 8544d267674abaf5f4f6013fb5c64c81c83f35e1..7fc2ca734571960776b13b3f82d8d14dfe709e70 100644 (file)
@@ -520,6 +520,7 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
 static void
 mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
 {
+       struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
        int i;
 
        memset(&sh->cmng, 0, sizeof(sh->cmng));
@@ -532,6 +533,10 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
                TAILQ_INIT(&sh->cmng.counters[i]);
                rte_spinlock_init(&sh->cmng.csl[i]);
        }
+       if (sh->devx && !haswell_broadwell_cpu) {
+               sh->cmng.relaxed_ordering_write = attr->relaxed_ordering_write;
+               sh->cmng.relaxed_ordering_read = attr->relaxed_ordering_read;
+       }
 }
 
 /**
@@ -1317,7 +1322,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
        sh->devx = sh->cdev->config.devx;
        if (spawn->bond_info)
                sh->bond = *spawn->bond_info;
-       err = mlx5_os_get_dev_attr(sh->cdev->ctx, &sh->device_attr);
+       err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr);
        if (err) {
                DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
                goto error;
index a9caa3a897a15546221ea5e881f0f52cb0edb24f..bc00b21b414b935a2e2e8160def804f433a9a6a7 100644 (file)
@@ -1120,11 +1120,8 @@ struct mlx5_dev_ctx_shared {
        uint32_t refcnt;
        uint32_t devx:1; /* Opened with DV. */
        uint32_t flow_hit_aso_en:1; /* Flow Hit ASO is supported. */
-       uint32_t rq_ts_format:2; /* RQ timestamp formats supported. */
-       uint32_t sq_ts_format:2; /* SQ timestamp formats supported. */
        uint32_t steering_format_version:4;
        /* Indicates the device steering logic format. */
-       uint32_t qp_ts_format:2; /* QP timestamp formats supported. */
        uint32_t meter_aso_en:1; /* Flow Meter ASO is supported. */
        uint32_t ct_aso_en:1; /* Connection Tracking ASO is supported. */
        uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */
@@ -1756,7 +1753,8 @@ void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
 
 /* mlx5_os.c */
 struct rte_pci_driver;
-int mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *dev_attr);
+int mlx5_os_get_dev_attr(struct mlx5_common_device *dev,
+                        struct mlx5_dev_attr *dev_attr);
 void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
 int mlx5_os_net_probe(struct mlx5_common_device *cdev);
 void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
index b98b82bf79071ae10b5279b8eb49d74e16117ca3..6b6b9c77ae49fe78bd49e98c971948448da10944 100644 (file)
@@ -236,6 +236,7 @@ static int
 mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_common_device *cdev = priv->sh->cdev;
        struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
        struct mlx5_rxq_ctrl *rxq_ctrl =
                container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
@@ -249,7 +250,8 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
        rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1;
        rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id;
        rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
-       rq_attr.ts_format = mlx5_ts_format_conv(priv->sh->rq_ts_format);
+       rq_attr.ts_format =
+                       mlx5_ts_format_conv(cdev->config.hca_attr.rq_ts_format);
        /* Fill WQ attributes for this RQ. */
        if (mlx5_rxq_mprq_enabled(rxq_data)) {
                rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
@@ -276,12 +278,11 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
        rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
                                                MLX5_WQ_END_PAD_MODE_ALIGN :
                                                MLX5_WQ_END_PAD_MODE_NONE;
-       rq_attr.wq_attr.pd = priv->sh->cdev->pdn;
+       rq_attr.wq_attr.pd = cdev->pdn;
        rq_attr.counter_set_id = priv->counter_set_id;
        /* Create RQ using DevX API. */
-       return mlx5_devx_rq_create(priv->sh->cdev->ctx, &rxq_ctrl->obj->rq_obj,
-                                  wqe_size, log_desc_n, &rq_attr,
-                                  rxq_ctrl->socket);
+       return mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size,
+                                  log_desc_n, &rq_attr, rxq_ctrl->socket);
 }
 
 /**
@@ -981,6 +982,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
                                  uint16_t log_desc_n)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_common_device *cdev = priv->sh->cdev;
        struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
        struct mlx5_txq_ctrl *txq_ctrl =
                        container_of(txq_data, struct mlx5_txq_ctrl, txq);
@@ -994,14 +996,15 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
                .tis_lst_sz = 1,
                .tis_num = priv->sh->tis->id,
                .wq_attr = (struct mlx5_devx_wq_attr){
-                       .pd = priv->sh->cdev->pdn,
+                       .pd = cdev->pdn,
                        .uar_page =
                                 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),
                },
-               .ts_format = mlx5_ts_format_conv(priv->sh->sq_ts_format),
+               .ts_format =
+                       mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format),
        };
        /* Create Send Queue object with DevX. */
-       return mlx5_devx_sq_create(priv->sh->cdev->ctx, &txq_obj->sq_obj,
+       return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj,
                                   log_desc_n, &sq_attr, priv->sh->numa_node);
 }
 #endif
index 17e3f2a300143bf2f135866ffa7d03bf060b8634..8f3d2ffc2c692fdc984cf93afaf389ad21001165 100644 (file)
@@ -319,7 +319,7 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
                if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
                                       sh->tx_uar, cdev->pdn,
                                       MLX5_ASO_QUEUE_LOG_DESC,
-                                      sh->sq_ts_format)) {
+                                      cdev->config.hca_attr.sq_ts_format)) {
                        mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
                        return -1;
                }
@@ -329,7 +329,7 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
                if (mlx5_aso_sq_create(cdev->ctx, &sh->mtrmng->pools_mng.sq, 0,
                                       sh->tx_uar, cdev->pdn,
                                       MLX5_ASO_QUEUE_LOG_DESC,
-                                      sh->sq_ts_format))
+                                      cdev->config.hca_attr.sq_ts_format))
                        return -1;
                mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
                break;
@@ -341,7 +341,7 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
                if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
                                       sh->tx_uar, cdev->pdn,
                                       MLX5_ASO_QUEUE_LOG_DESC,
-                                      sh->sq_ts_format)) {
+                                      cdev->config.hca_attr.sq_ts_format)) {
                        mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
                        return -1;
                }
index fb7b36197c3318bb19b5b34649ae8bc317a1a6de..9960cc44e7e2cd48c95f8760ab9e6d9958c276e7 100644 (file)
@@ -235,7 +235,8 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
                        .pd = sh->cdev->pdn,
                        .uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
                },
-               .ts_format = mlx5_ts_format_conv(sh->sq_ts_format),
+               .ts_format = mlx5_ts_format_conv
+                                      (sh->cdev->config.hca_attr.sq_ts_format),
        };
        struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
        struct mlx5_devx_cq_attr cq_attr = {
@@ -445,7 +446,8 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
        sq_attr.wq_attr.cd_slave = 1;
        sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
        sq_attr.wq_attr.pd = sh->cdev->pdn;
-       sq_attr.ts_format = mlx5_ts_format_conv(sh->sq_ts_format);
+       sq_attr.ts_format =
+               mlx5_ts_format_conv(sh->cdev->config.hca_attr.sq_ts_format);
        ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
                                  log2above(wq->sq_size),
                                  &sq_attr, sh->numa_node);
index 4f1d37fc896dde8b8268a061dc24a71eda4cbe3c..3660aae32236b63a5ee0d5de1376db4eca700acb 100644 (file)
@@ -143,50 +143,45 @@ mlx5_init_once(void)
 /**
  * Get mlx5 device attributes.
  *
- * @param ctx
- *   Pointer to device context.
+ * @param cdev
+ *   Pointer to mlx5 device.
  *
  * @param device_attr
  *   Pointer to mlx5 device attributes.
  *
  * @return
- *   0 on success, non zero error number otherwise
+ *   0 on success, non zero error number otherwise.
  */
 int
-mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
+mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+                    struct mlx5_dev_attr *device_attr)
 {
        struct mlx5_context *mlx5_ctx;
-       struct mlx5_hca_attr hca_attr;
        void *pv_iseg = NULL;
        u32 cb_iseg = 0;
        int err = 0;
 
-       if (!ctx)
+       if (!cdev || !cdev->ctx)
                return -EINVAL;
-       mlx5_ctx = (struct mlx5_context *)ctx;
+       mlx5_ctx = (struct mlx5_context *)cdev->ctx;
        memset(device_attr, 0, sizeof(*device_attr));
-       err = mlx5_devx_cmd_query_hca_attr(mlx5_ctx, &hca_attr);
-       if (err) {
-               DRV_LOG(ERR, "Failed to get device hca_cap");
-               return err;
-       }
-       device_attr->max_cq = 1 << hca_attr.log_max_cq;
-       device_attr->max_qp = 1 << hca_attr.log_max_qp;
-       device_attr->max_qp_wr = 1 << hca_attr.log_max_qp_sz;
-       device_attr->max_cqe = 1 << hca_attr.log_max_cq_sz;
-       device_attr->max_mr = 1 << hca_attr.log_max_mrw_sz;
-       device_attr->max_pd = 1 << hca_attr.log_max_pd;
-       device_attr->max_srq = 1 << hca_attr.log_max_srq;
-       device_attr->max_srq_wr = 1 << hca_attr.log_max_srq_sz;
-       device_attr->max_tso = 1 << hca_attr.max_lso_cap;
-       if (hca_attr.rss_ind_tbl_cap) {
+       device_attr->max_cq = 1 << cdev->config.hca_attr.log_max_cq;
+       device_attr->max_qp = 1 << cdev->config.hca_attr.log_max_qp;
+       device_attr->max_qp_wr = 1 << cdev->config.hca_attr.log_max_qp_sz;
+       device_attr->max_cqe = 1 << cdev->config.hca_attr.log_max_cq_sz;
+       device_attr->max_mr = 1 << cdev->config.hca_attr.log_max_mrw_sz;
+       device_attr->max_pd = 1 << cdev->config.hca_attr.log_max_pd;
+       device_attr->max_srq = 1 << cdev->config.hca_attr.log_max_srq;
+       device_attr->max_srq_wr = 1 << cdev->config.hca_attr.log_max_srq_sz;
+       device_attr->max_tso = 1 << cdev->config.hca_attr.max_lso_cap;
+       if (cdev->config.hca_attr.rss_ind_tbl_cap) {
                device_attr->max_rwq_indirection_table_size =
-                       1 << hca_attr.rss_ind_tbl_cap;
+                       1 << cdev->config.hca_attr.rss_ind_tbl_cap;
        }
        device_attr->sw_parsing_offloads =
-               mlx5_get_supported_sw_parsing_offloads(&hca_attr);
+               mlx5_get_supported_sw_parsing_offloads(&cdev->config.hca_attr);
        device_attr->tunnel_offloads_caps =
-               mlx5_get_supported_tunneling_offloads(&hca_attr);
+               mlx5_get_supported_tunneling_offloads(&cdev->config.hca_attr);
        pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
        if (pv_iseg == NULL) {
                DRV_LOG(ERR, "Failed to get device hca_iseg");
@@ -364,7 +359,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                goto error;
        }
        DRV_LOG(DEBUG, "MPW isn't supported");
-       mlx5_os_get_dev_attr(sh->cdev->ctx, &device_attr);
+       mlx5_os_get_dev_attr(sh->cdev, &device_attr);
        config->swp = device_attr.sw_parsing_offloads &
                (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
                 MLX5_SW_PARSING_TSO_CAP);
@@ -472,21 +467,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                config->cqe_comp = 0;
        }
        if (sh->devx) {
-               err = mlx5_devx_cmd_query_hca_attr(sh->cdev->ctx,
-                                                  &config->hca_attr);
-               if (err) {
-                       err = -err;
-                       goto error;
-               }
-               /* Check relax ordering support. */
-               sh->cmng.relaxed_ordering_read = 0;
-               sh->cmng.relaxed_ordering_write = 0;
-               if (!haswell_broadwell_cpu) {
-                       sh->cmng.relaxed_ordering_write =
-                               config->hca_attr.relaxed_ordering_write;
-                       sh->cmng.relaxed_ordering_read =
-                               config->hca_attr.relaxed_ordering_read;
-               }
+               config->hca_attr = sh->cdev->config.hca_attr;
                config->hw_csum = config->hca_attr.csum_cap;
                DRV_LOG(DEBUG, "checksum offloading is %ssupported",
                    (config->hw_csum ? "" : "not "));
@@ -516,9 +497,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                                                 (NS_PER_S / MS_PER_S))
                                config->rt_timestamp = 1;
                }
-               sh->rq_ts_format = config->hca_attr.rq_ts_format;
-               sh->sq_ts_format = config->hca_attr.sq_ts_format;
-               sh->qp_ts_format = config->hca_attr.qp_ts_format;
        }
        if (config->mprq.enabled) {
                DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
index 54d3e64f436f889df3a63a64c0af5f3df7acea8a..91fb9310621e8c50f24c32148d3a7fa7dc1922ac 100644 (file)
@@ -125,18 +125,13 @@ static int
 mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
 {
        struct mlx5_regex_priv *priv = NULL;
-       struct mlx5_hca_attr attr;
+       struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
        char name[RTE_REGEXDEV_NAME_MAX_LEN];
        int ret;
        uint32_t val;
 
-       ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &attr);
-       if (ret) {
-               DRV_LOG(ERR, "Unable to read HCA capabilities.");
-               rte_errno = ENOTSUP;
-               return -rte_errno;
-       } else if (((!attr.regex) && (!attr.mmo_regex_sq_en) &&
-               (!attr.mmo_regex_qp_en)) || attr.regexp_num_of_engines == 0) {
+       if ((!attr->regex && !attr->mmo_regex_sq_en && !attr->mmo_regex_qp_en)
+           || attr->regexp_num_of_engines == 0) {
                DRV_LOG(ERR, "Not enough capabilities to support RegEx, maybe "
                        "old FW/OFED version?");
                rte_errno = ENOTSUP;
@@ -154,9 +149,8 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
                rte_errno = ENOMEM;
                return -rte_errno;
        }
-       priv->mmo_regex_qp_cap = attr.mmo_regex_qp_en;
-       priv->mmo_regex_sq_cap = attr.mmo_regex_sq_en;
-       priv->qp_ts_format = attr.qp_ts_format;
+       priv->mmo_regex_qp_cap = attr->mmo_regex_qp_en;
+       priv->mmo_regex_sq_cap = attr->mmo_regex_sq_en;
        priv->cdev = cdev;
        priv->nb_engines = 2; /* attr.regexp_num_of_engines */
        ret = mlx5_devx_regex_register_read(priv->cdev->ctx, 0,
@@ -190,8 +184,8 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
        priv->regexdev->dev_ops = &mlx5_regexdev_ops;
        priv->regexdev->enqueue = mlx5_regexdev_enqueue;
 #ifdef HAVE_MLX5_UMR_IMKEY
-       if (!attr.umr_indirect_mkey_disabled &&
-           !attr.umr_modify_entity_size_disabled)
+       if (!attr->umr_indirect_mkey_disabled &&
+           !attr->umr_modify_entity_size_disabled)
                priv->has_umr = 1;
        if (priv->has_umr)
                priv->regexdev->enqueue = mlx5_regexdev_enqueue_gga;
index c128b7acbb7cc1ec352c02f164650e145ec45b1b..be81931b3a5e01be528af5f78717c281b9a378e3 100644 (file)
@@ -72,7 +72,6 @@ struct mlx5_regex_priv {
        /**< Called by memory event callback. */
        struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
        uint8_t is_bf2; /* The device is BF2 device. */
-       uint8_t qp_ts_format; /* Whether SQ supports timestamp formats. */
        uint8_t has_umr; /* The device supports UMR. */
        uint32_t mmo_regex_qp_cap:1;
        uint32_t mmo_regex_sq_cap:1;
index 1136de1d7efbf67718e5b84a059ca7fe29546e0a..545bbbcf89adbbbff521145cc1e225e7b2761174 100644 (file)
@@ -139,7 +139,8 @@ regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
                .cqn = qp->cq.cq_obj.cq->id,
                .uar_index = priv->uar->page_id,
                .pd = priv->cdev->pdn,
-               .ts_format = mlx5_ts_format_conv(priv->qp_ts_format),
+               .ts_format = mlx5_ts_format_conv
+                                    (priv->cdev->config.hca_attr.qp_ts_format),
                .user_index = q_ind,
        };
        struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
index fe68ab02520a906417bd526a0cea33dbb8544793..3971f2e335dcf8bb5a37f4701fef7323e37d33b8 100644 (file)
@@ -505,36 +505,29 @@ static int
 mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev)
 {
        struct mlx5_vdpa_priv *priv = NULL;
-       struct mlx5_hca_attr attr;
-       int ret;
+       struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
 
-       ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &attr);
-       if (ret) {
-               DRV_LOG(ERR, "Unable to read HCA capabilities.");
-               rte_errno = ENOTSUP;
-               return -rte_errno;
-       } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
+       if (!attr->vdpa.valid || !attr->vdpa.max_num_virtio_queues) {
                DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
                        "old FW/OFED version?");
                rte_errno = ENOTSUP;
                return -rte_errno;
        }
-       if (!attr.vdpa.queue_counters_valid)
+       if (!attr->vdpa.queue_counters_valid)
                DRV_LOG(DEBUG, "No capability to support virtq statistics.");
        priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
                           sizeof(struct mlx5_vdpa_virtq) *
-                          attr.vdpa.max_num_virtio_queues * 2,
+                          attr->vdpa.max_num_virtio_queues * 2,
                           RTE_CACHE_LINE_SIZE);
        if (!priv) {
                DRV_LOG(ERR, "Failed to allocate private memory.");
                rte_errno = ENOMEM;
                return -rte_errno;
        }
-       priv->caps = attr.vdpa;
-       priv->log_max_rqt_size = attr.log_max_rqt_size;
-       priv->num_lag_ports = attr.num_lag_ports;
-       priv->qp_ts_format = attr.qp_ts_format;
-       if (attr.num_lag_ports == 0)
+       priv->caps = attr->vdpa;
+       priv->log_max_rqt_size = attr->log_max_rqt_size;
+       priv->num_lag_ports = attr->num_lag_ports;
+       if (attr->num_lag_ports == 0)
                priv->num_lag_ports = 1;
        priv->cdev = cdev;
        priv->var = mlx5_glue->dv_alloc_var(priv->cdev->ctx, 0);
index d9a68e701e3ef463b11536a7b5dd0cb13d4fabb9..5045fea773e565832ca43eea80a76574b38407c7 100644 (file)
@@ -142,7 +142,6 @@ struct mlx5_vdpa_priv {
        struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */
        uint16_t nr_virtqs;
        uint8_t num_lag_ports;
-       uint8_t qp_ts_format;
        uint64_t features; /* Negotiated features. */
        uint16_t log_max_rqt_size;
        struct mlx5_vdpa_steer steer;
index 47f9afe855758519db14a052004ea0587dbc5cfc..19497597e69a96a335e8d6ffb5652f01c30e7972 100644 (file)
@@ -594,7 +594,8 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
        if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
                return -1;
        attr.pd = priv->cdev->pdn;
-       attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
+       attr.ts_format =
+               mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
        eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
        if (!eqp->fw_qp) {
                DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
@@ -605,7 +606,8 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
        attr.rq_size = RTE_BIT32(log_desc_n);
        attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
        attr.sq_size = 0; /* No need SQ. */
-       attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
+       attr.ts_format =
+               mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
        ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp), log_desc_n,
                                  &attr, SOCKET_ID_ANY);
        if (ret) {