]> git.droids-corp.org - dpdk.git/commitdiff
net/mlx5: rearrange device attribute structure
authorMichael Baum <michaelba@nvidia.com>
Mon, 14 Feb 2022 09:35:05 +0000 (11:35 +0200)
committerRaslan Darawsheh <rasland@nvidia.com>
Mon, 21 Feb 2022 10:36:50 +0000 (11:36 +0100)
Rearrange the mlx5_os_get_dev_attr() function in such a way that it
first executes the queries and only then updates the fields.
In addition, it changed its name in preparation for expanding its
operations to configure the capabilities inside it.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/net/mlx5/linux/mlx5_os.c
drivers/net/mlx5/linux/mlx5_verbs.c
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_devx.c
drivers/net/mlx5/mlx5_ethdev.c
drivers/net/mlx5/mlx5_trigger.c
drivers/net/mlx5/mlx5_txq.c
drivers/net/mlx5/windows/mlx5_os.c

index 69ba2aaf88686f5cb6a462b571e28b1a88dcc19a..f0aa0f4164c0c91613a161ae0bafa68d9fa25e41 100644 (file)
@@ -131,46 +131,25 @@ mlx5_os_set_nonblock_channel_fd(int fd)
  * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
  * device attributes from the glue out parameter.
  *
- * @param cdev
- *   Pointer to mlx5 device.
- *
- * @param device_attr
- *   Pointer to mlx5 device attributes.
+ * @param sh
+ *   Pointer to shared device context.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
-                    struct mlx5_dev_attr *device_attr)
+mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
 {
        int err;
-       struct ibv_context *ctx = cdev->ctx;
-       struct ibv_device_attr_ex attr_ex;
+       struct ibv_context *ctx = sh->cdev->ctx;
+       struct ibv_device_attr_ex attr_ex = { .comp_mask = 0 };
+       struct mlx5dv_context dv_attr = { .comp_mask = 0 };
 
-       memset(device_attr, 0, sizeof(*device_attr));
        err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
        if (err) {
                rte_errno = errno;
                return -rte_errno;
        }
-       device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
-       device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
-       device_attr->max_sge = attr_ex.orig_attr.max_sge;
-       device_attr->max_cq = attr_ex.orig_attr.max_cq;
-       device_attr->max_cqe = attr_ex.orig_attr.max_cqe;
-       device_attr->max_mr = attr_ex.orig_attr.max_mr;
-       device_attr->max_pd = attr_ex.orig_attr.max_pd;
-       device_attr->max_qp = attr_ex.orig_attr.max_qp;
-       device_attr->max_srq = attr_ex.orig_attr.max_srq;
-       device_attr->max_srq_wr = attr_ex.orig_attr.max_srq_wr;
-       device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
-       device_attr->max_rwq_indirection_table_size =
-               attr_ex.rss_caps.max_rwq_indirection_table_size;
-       device_attr->max_tso = attr_ex.tso_caps.max_tso;
-       device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
-
-       struct mlx5dv_context dv_attr = { .comp_mask = 0 };
 #ifdef HAVE_IBV_MLX5_MOD_SWP
        dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
 #endif
@@ -185,31 +164,40 @@ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
                rte_errno = errno;
                return -rte_errno;
        }
-
-       device_attr->flags = dv_attr.flags;
-       device_attr->comp_mask = dv_attr.comp_mask;
+       memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
+       sh->dev_cap.device_cap_flags_ex = attr_ex.device_cap_flags_ex;
+       sh->dev_cap.max_qp_wr = attr_ex.orig_attr.max_qp_wr;
+       sh->dev_cap.max_sge = attr_ex.orig_attr.max_sge;
+       sh->dev_cap.max_cq = attr_ex.orig_attr.max_cq;
+       sh->dev_cap.max_qp = attr_ex.orig_attr.max_qp;
+       sh->dev_cap.raw_packet_caps = attr_ex.raw_packet_caps;
+       sh->dev_cap.max_rwq_indirection_table_size =
+               attr_ex.rss_caps.max_rwq_indirection_table_size;
+       sh->dev_cap.max_tso = attr_ex.tso_caps.max_tso;
+       sh->dev_cap.tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
+       strlcpy(sh->dev_cap.fw_ver, attr_ex.orig_attr.fw_ver,
+               sizeof(sh->dev_cap.fw_ver));
+       sh->dev_cap.flags = dv_attr.flags;
+       sh->dev_cap.comp_mask = dv_attr.comp_mask;
 #ifdef HAVE_IBV_MLX5_MOD_SWP
-       device_attr->sw_parsing_offloads =
+       sh->dev_cap.sw_parsing_offloads =
                dv_attr.sw_parsing_caps.sw_parsing_offloads;
 #endif
 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
-       device_attr->min_single_stride_log_num_of_bytes =
+       sh->dev_cap.min_single_stride_log_num_of_bytes =
                dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
-       device_attr->max_single_stride_log_num_of_bytes =
+       sh->dev_cap.max_single_stride_log_num_of_bytes =
                dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
-       device_attr->min_single_wqe_log_num_of_strides =
+       sh->dev_cap.min_single_wqe_log_num_of_strides =
                dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
-       device_attr->max_single_wqe_log_num_of_strides =
+       sh->dev_cap.max_single_wqe_log_num_of_strides =
                dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
-       device_attr->stride_supported_qpts =
+       sh->dev_cap.stride_supported_qpts =
                dv_attr.striding_rq_caps.supported_qpts;
 #endif
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-       device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
+       sh->dev_cap.tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
 #endif
-       strlcpy(device_attr->fw_ver, attr_ex.orig_attr.fw_ver,
-               sizeof(device_attr->fw_ver));
-
        return 0;
 }
 
@@ -983,8 +971,8 @@ err_secondary:
         * Multi-packet send is supported by ConnectX-4 Lx PF as well
         * as all ConnectX-5 devices.
         */
-       if (sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
-               if (sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
+       if (sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
+               if (sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
                        DRV_LOG(DEBUG, "enhanced MPW is supported");
                        mps = MLX5_MPW_ENHANCED;
                } else {
@@ -996,41 +984,41 @@ err_secondary:
                mps = MLX5_MPW_DISABLED;
        }
 #ifdef HAVE_IBV_MLX5_MOD_SWP
-       if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
-               swp = sh->device_attr.sw_parsing_offloads;
+       if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
+               swp = sh->dev_cap.sw_parsing_offloads;
        DRV_LOG(DEBUG, "SWP support: %u", swp);
 #endif
        config->swp = swp & (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
                MLX5_SW_PARSING_TSO_CAP);
 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
-       if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
+       if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
                DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
-                       sh->device_attr.min_single_stride_log_num_of_bytes);
+                       sh->dev_cap.min_single_stride_log_num_of_bytes);
                DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
-                       sh->device_attr.max_single_stride_log_num_of_bytes);
+                       sh->dev_cap.max_single_stride_log_num_of_bytes);
                DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
-                       sh->device_attr.min_single_wqe_log_num_of_strides);
+                       sh->dev_cap.min_single_wqe_log_num_of_strides);
                DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
-                       sh->device_attr.max_single_wqe_log_num_of_strides);
+                       sh->dev_cap.max_single_wqe_log_num_of_strides);
                DRV_LOG(DEBUG, "\tsupported_qpts: %d",
-                       sh->device_attr.stride_supported_qpts);
+                       sh->dev_cap.stride_supported_qpts);
                DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %d",
                        config->mprq.log_min_stride_wqe_size);
                DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
                mprq = 1;
                config->mprq.log_min_stride_size =
-                       sh->device_attr.min_single_stride_log_num_of_bytes;
+                       sh->dev_cap.min_single_stride_log_num_of_bytes;
                config->mprq.log_max_stride_size =
-                       sh->device_attr.max_single_stride_log_num_of_bytes;
+                       sh->dev_cap.max_single_stride_log_num_of_bytes;
                config->mprq.log_min_stride_num =
-                       sh->device_attr.min_single_wqe_log_num_of_strides;
+                       sh->dev_cap.min_single_wqe_log_num_of_strides;
                config->mprq.log_max_stride_num =
-                       sh->device_attr.max_single_wqe_log_num_of_strides;
+                       sh->dev_cap.max_single_wqe_log_num_of_strides;
        }
 #endif
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-       if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
-               config->tunnel_en = sh->device_attr.tunnel_offloads_caps &
+       if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+               config->tunnel_en = sh->dev_cap.tunnel_offloads_caps &
                             (MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN |
                              MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE |
                              MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE);
@@ -1052,9 +1040,9 @@ err_secondary:
                "tunnel offloading disabled due to old OFED/rdma-core version");
 #endif
 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
-       mpls_en = ((sh->device_attr.tunnel_offloads_caps &
+       mpls_en = ((sh->dev_cap.tunnel_offloads_caps &
                    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
-                  (sh->device_attr.tunnel_offloads_caps &
+                  (sh->dev_cap.tunnel_offloads_caps &
                    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
        DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
                mpls_en ? "" : "not ");
@@ -1215,7 +1203,7 @@ err_secondary:
                DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n",
                        priv->dev_port, priv->domain_id);
        }
-       config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
+       config->hw_csum = !!(sh->dev_cap.device_cap_flags_ex &
                            IBV_DEVICE_RAW_IP_CSUM);
        DRV_LOG(DEBUG, "checksum offloading is %ssupported",
                (config->hw_csum ? "" : "not "));
@@ -1224,7 +1212,7 @@ err_secondary:
        DRV_LOG(DEBUG, "counters are not supported");
 #endif
        config->ind_table_max_size =
-               sh->device_attr.max_rwq_indirection_table_size;
+               sh->dev_cap.max_rwq_indirection_table_size;
        /*
         * Remove this check once DPDK supports larger/variable
         * indirection tables.
@@ -1233,16 +1221,16 @@ err_secondary:
                config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
        DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
                config->ind_table_max_size);
-       config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
+       config->hw_vlan_strip = !!(sh->dev_cap.raw_packet_caps &
                                  IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
        DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
                (config->hw_vlan_strip ? "" : "not "));
-       config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
+       config->hw_fcs_strip = !!(sh->dev_cap.raw_packet_caps &
                                 IBV_RAW_PACKET_CAP_SCATTER_FCS);
 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
-       hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
+       hw_padding = !!sh->dev_cap.rx_pad_end_addr_align;
 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
-       hw_padding = !!(sh->device_attr.device_cap_flags_ex &
+       hw_padding = !!(sh->dev_cap.device_cap_flags_ex &
                        IBV_DEVICE_PCI_WRITE_END_PADDING);
 #endif
        if (config->hw_padding && !hw_padding) {
@@ -1251,11 +1239,11 @@ err_secondary:
        } else if (config->hw_padding) {
                DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
        }
-       config->tso = (sh->device_attr.max_tso > 0 &&
-                     (sh->device_attr.tso_supported_qpts &
+       config->tso = (sh->dev_cap.max_tso > 0 &&
+                     (sh->dev_cap.tso_supported_qpts &
                       (1 << IBV_QPT_RAW_PACKET)));
        if (config->tso)
-               config->tso_max_payload_sz = sh->device_attr.max_tso;
+               config->tso_max_payload_sz = sh->dev_cap.max_tso;
        /*
         * MPW is disabled by default, while the Enhanced MPW is enabled
         * by default.
@@ -1382,7 +1370,7 @@ err_secondary:
 #endif
        }
        if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 &&
-           !(sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
+           !(sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
                DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
                config->cqe_comp = 0;
        }
index 722017efa4d70abe336ea572c576f2d2a32c3376..73c44138de9e1aae839db0e08717d43995afce4b 100644 (file)
@@ -872,13 +872,12 @@ mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)
        /* CQ to be associated with the receive queue. */
        qp_attr.recv_cq = txq_ctrl->obj->cq;
        /* Max number of outstanding WRs. */
-       qp_attr.cap.max_send_wr = ((priv->sh->device_attr.max_qp_wr < desc) ?
-                                  priv->sh->device_attr.max_qp_wr : desc);
+       qp_attr.cap.max_send_wr = RTE_MIN(priv->sh->dev_cap.max_qp_wr, desc);
        /*
         * Max number of scatter/gather elements in a WR, must be 1 to prevent
         * libmlx5 from trying to affect must be 1 to prevent libmlx5 from
         * trying to affect too much memory. TX gather is not impacted by the
-        * device_attr.max_sge limit and will still work properly.
+        * dev_cap.max_sge limit and will still work properly.
         */
        qp_attr.cap.max_send_sge = 1;
        qp_attr.qp_type = IBV_QPT_RAW_PACKET,
index b26632d2499079a132ed70ac556facd447bd6ab5..7487b1f87da3f4fab737f81ba1f52d58f181cf60 100644 (file)
@@ -1262,9 +1262,9 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
        sh->esw_mode = !!(spawn->info.master || spawn->info.representor);
        if (spawn->bond_info)
                sh->bond = *spawn->bond_info;
-       err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr);
+       err = mlx5_os_capabilities_prepare(sh);
        if (err) {
-               DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
+               DRV_LOG(ERR, "Fail to configure device capabilities.");
                goto error;
        }
        sh->refcnt = 1;
index a713e61572e420bd41f744ffd0ab53e3dcf158b8..fd6350eee73c08113bbae5d8e7905f5db7f295c5 100644 (file)
@@ -114,32 +114,31 @@ struct mlx5_flow_cb_ctx {
        void *data2;
 };
 
-/* Device attributes used in mlx5 PMD */
-struct mlx5_dev_attr {
-       uint64_t        device_cap_flags_ex;
-       int             max_qp_wr;
-       int             max_sge;
-       int             max_cq;
-       int             max_qp;
-       int             max_cqe;
-       uint32_t        max_pd;
-       uint32_t        max_mr;
-       uint32_t        max_srq;
-       uint32_t        max_srq_wr;
-       uint32_t        raw_packet_caps;
-       uint32_t        max_rwq_indirection_table_size;
-       uint32_t        max_tso;
-       uint32_t        tso_supported_qpts;
-       uint64_t        flags;
-       uint64_t        comp_mask;
-       uint32_t        sw_parsing_offloads;
-       uint32_t        min_single_stride_log_num_of_bytes;
-       uint32_t        max_single_stride_log_num_of_bytes;
-       uint32_t        min_single_wqe_log_num_of_strides;
-       uint32_t        max_single_wqe_log_num_of_strides;
-       uint32_t        stride_supported_qpts;
-       uint32_t        tunnel_offloads_caps;
-       char            fw_ver[64];
+/* Device capabilities structure which isn't changed in any stage. */
+struct mlx5_dev_cap {
+       uint64_t device_cap_flags_ex;
+       int max_cq; /* Maximum number of supported CQs */
+       int max_qp; /* Maximum number of supported QPs. */
+       int max_qp_wr; /* Maximum number of outstanding WR on any WQ. */
+       int max_sge;
+       /* Maximum number of s/g per WR for SQ & RQ of QP for non RDMA Read
+        * operations.
+        */
+       uint32_t raw_packet_caps;
+       uint32_t max_rwq_indirection_table_size;
+       /* Maximum receive WQ indirection table size. */
+       uint32_t max_tso; /* Maximum TCP payload for TSO. */
+       uint32_t tso_supported_qpts;
+       uint64_t flags;
+       uint64_t comp_mask;
+       uint32_t sw_parsing_offloads;
+       uint32_t min_single_stride_log_num_of_bytes;
+       uint32_t max_single_stride_log_num_of_bytes;
+       uint32_t min_single_wqe_log_num_of_strides;
+       uint32_t max_single_wqe_log_num_of_strides;
+       uint32_t stride_supported_qpts;
+       uint32_t tunnel_offloads_caps;
+       char fw_ver[64]; /* Firmware version of this device. */
 };
 
 /** Data associated with devices to spawn. */
@@ -1165,7 +1164,7 @@ struct mlx5_dev_ctx_shared {
        uint32_t tdn; /* Transport Domain number. */
        char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
        char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
-       struct mlx5_dev_attr device_attr; /* Device properties. */
+       struct mlx5_dev_cap dev_cap; /* Device capabilities. */
        int numa_node; /* Numa node of backing physical device. */
        /* Packet pacing related structure. */
        struct mlx5_dev_txpp txpp;
@@ -1792,8 +1791,7 @@ void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
 /* mlx5_os.c */
 
 struct rte_pci_driver;
-int mlx5_os_get_dev_attr(struct mlx5_common_device *dev,
-                        struct mlx5_dev_attr *dev_attr);
+int mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh);
 void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
 int mlx5_os_net_probe(struct mlx5_common_device *cdev);
 void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
index 97c89250448f6c92cf40ae0d1233d2c7485a6210..553df6424d6579955e3aaf1870d6d176b40117e2 100644 (file)
@@ -1305,7 +1305,7 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
        wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
        /* Create Send Queue object with DevX. */
        wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size,
-                       (uint32_t)priv->sh->device_attr.max_qp_wr);
+                       (uint32_t)priv->sh->dev_cap.max_qp_wr);
        log_desc_n = log2above(wqe_n);
        ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n);
        if (ret) {
index 06d5acb75fcc00275f9cecee2a985b6941df20d3..d970eb6904725dd9a0ee327480525145112600ea 100644 (file)
@@ -313,8 +313,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
         * Since we need one CQ per QP, the limit is the minimum number
         * between the two values.
         */
-       max = RTE_MIN(priv->sh->device_attr.max_cq,
-                     priv->sh->device_attr.max_qp);
+       max = RTE_MIN(priv->sh->dev_cap.max_cq, priv->sh->dev_cap.max_qp);
        /* max_rx_queues is uint16_t. */
        max = RTE_MIN(max, (unsigned int)UINT16_MAX);
        info->max_rx_queues = max;
@@ -516,7 +515,7 @@ int
 mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_attr *attr = &priv->sh->device_attr;
+       struct mlx5_dev_cap *attr = &priv->sh->dev_cap;
        size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1;
 
        if (fw_size < size)
index a00b56eecfaefc87cbbd49b35395baa0f27fc2fa..f46089fd5652b7e98cbdbfa30cbf0a52766c81e6 100644 (file)
@@ -215,10 +215,10 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
                /* Should not release Rx queues but return immediately. */
                return -rte_errno;
        }
-       DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
-               dev->data->port_id, priv->sh->device_attr.max_qp_wr);
-       DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
-               dev->data->port_id, priv->sh->device_attr.max_sge);
+       DRV_LOG(DEBUG, "Port %u dev_cap.max_qp_wr is %d.",
+               dev->data->port_id, priv->sh->dev_cap.max_qp_wr);
+       DRV_LOG(DEBUG, "Port %u dev_cap.max_sge is %d.",
+               dev->data->port_id, priv->sh->dev_cap.max_sge);
        for (i = 0; i != priv->rxqs_n; ++i) {
                struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i);
                struct mlx5_rxq_ctrl *rxq_ctrl;
index 4e0bf7af9ca267316158fc4b0d291b8cd4a2c839..56e0937ca3ee5864fb0a7616e280e606f0d39cc3 100644 (file)
@@ -714,7 +714,7 @@ txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
        struct mlx5_priv *priv = txq_ctrl->priv;
        unsigned int wqe_size;
 
-       wqe_size = priv->sh->device_attr.max_qp_wr / desc;
+       wqe_size = priv->sh->dev_cap.max_qp_wr / desc;
        if (!wqe_size)
                return 0;
        /*
@@ -982,8 +982,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " satisfied (%u) on port %u, try the smaller"
                        " Tx queue size (%d)",
                        txq_ctrl->txq.inlen_mode, max_inline,
-                       priv->dev_data->port_id,
-                       priv->sh->device_attr.max_qp_wr);
+                       priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.inlen_send > max_inline &&
@@ -994,8 +993,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " satisfied (%u) on port %u, try the smaller"
                        " Tx queue size (%d)",
                        txq_ctrl->txq.inlen_send, max_inline,
-                       priv->dev_data->port_id,
-                       priv->sh->device_attr.max_qp_wr);
+                       priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.inlen_empw > max_inline &&
@@ -1006,8 +1004,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " satisfied (%u) on port %u, try the smaller"
                        " Tx queue size (%d)",
                        txq_ctrl->txq.inlen_empw, max_inline,
-                       priv->dev_data->port_id,
-                       priv->sh->device_attr.max_qp_wr);
+                       priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
@@ -1016,8 +1013,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " satisfied (%u) on port %u, try the smaller"
                        " Tx queue size (%d)",
                        MLX5_MAX_TSO_HEADER, max_inline,
-                       priv->dev_data->port_id,
-                       priv->sh->device_attr.max_qp_wr);
+                       priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.inlen_send > max_inline) {
@@ -1098,12 +1094,12 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        if (txq_adjust_params(tmpl))
                goto error;
        if (txq_calc_wqebb_cnt(tmpl) >
-           priv->sh->device_attr.max_qp_wr) {
+           priv->sh->dev_cap.max_qp_wr) {
                DRV_LOG(ERR,
                        "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
                        " try smaller queue size",
                        dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
-                       priv->sh->device_attr.max_qp_wr);
+                       priv->sh->dev_cap.max_qp_wr);
                rte_errno = ENOMEM;
                goto error;
        }
index e509c3408305fef456be20bd6ec9f7b6656f0b67..91e91e4d36865233297e5f787cf886a65a84717e 100644 (file)
@@ -143,55 +143,42 @@ mlx5_init_once(void)
 }
 
 /**
- * Get mlx5 device attributes.
+ * Get mlx5 device capabilities.
  *
- * @param cdev
- *   Pointer to mlx5 device.
- *
- * @param device_attr
- *   Pointer to mlx5 device attributes.
+ * @param sh
+ *   Pointer to shared device context.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
-                    struct mlx5_dev_attr *device_attr)
+mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
 {
-       struct mlx5_context *mlx5_ctx;
+       struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+       struct mlx5_context *mlx5_ctx = sh->cdev->ctx;
        void *pv_iseg = NULL;
        u32 cb_iseg = 0;
 
-       if (!cdev || !cdev->ctx) {
-               rte_errno = EINVAL;
-               return -rte_errno;
-       }
-       mlx5_ctx = (struct mlx5_context *)cdev->ctx;
-       memset(device_attr, 0, sizeof(*device_attr));
-       device_attr->max_cq = 1 << cdev->config.hca_attr.log_max_cq;
-       device_attr->max_qp = 1 << cdev->config.hca_attr.log_max_qp;
-       device_attr->max_qp_wr = 1 << cdev->config.hca_attr.log_max_qp_sz;
-       device_attr->max_cqe = 1 << cdev->config.hca_attr.log_max_cq_sz;
-       device_attr->max_mr = 1 << cdev->config.hca_attr.log_max_mrw_sz;
-       device_attr->max_pd = 1 << cdev->config.hca_attr.log_max_pd;
-       device_attr->max_srq = 1 << cdev->config.hca_attr.log_max_srq;
-       device_attr->max_srq_wr = 1 << cdev->config.hca_attr.log_max_srq_sz;
-       device_attr->max_tso = 1 << cdev->config.hca_attr.max_lso_cap;
-       if (cdev->config.hca_attr.rss_ind_tbl_cap) {
-               device_attr->max_rwq_indirection_table_size =
-                       1 << cdev->config.hca_attr.rss_ind_tbl_cap;
-       }
-       device_attr->sw_parsing_offloads =
-               mlx5_get_supported_sw_parsing_offloads(&cdev->config.hca_attr);
-       device_attr->tunnel_offloads_caps =
-               mlx5_get_supported_tunneling_offloads(&cdev->config.hca_attr);
        pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
        if (pv_iseg == NULL) {
-               DRV_LOG(ERR, "Failed to get device hca_iseg");
+               DRV_LOG(ERR, "Failed to get device hca_iseg.");
                rte_errno = errno;
                return -rte_errno;
        }
-       snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
+       memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
+       sh->dev_cap.max_cq = 1 << hca_attr->log_max_cq;
+       sh->dev_cap.max_qp = 1 << hca_attr->log_max_qp;
+       sh->dev_cap.max_qp_wr = 1 << hca_attr->log_max_qp_sz;
+       sh->dev_cap.max_tso = 1 << hca_attr->max_lso_cap;
+       if (hca_attr->rss_ind_tbl_cap) {
+               sh->dev_cap.max_rwq_indirection_table_size =
+                       1 << hca_attr->rss_ind_tbl_cap;
+       }
+       sh->dev_cap.sw_parsing_offloads =
+               mlx5_get_supported_sw_parsing_offloads(hca_attr);
+       sh->dev_cap.tunnel_offloads_caps =
+               mlx5_get_supported_tunneling_offloads(hca_attr);
+       snprintf(sh->dev_cap.fw_ver, 64, "%x.%x.%04x",
                 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
                 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
                 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
@@ -335,12 +322,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                goto error;
        }
        DRV_LOG(DEBUG, "MPW isn't supported");
-       config->swp = sh->device_attr.sw_parsing_offloads &
+       config->swp = sh->dev_cap.sw_parsing_offloads &
                (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
                 MLX5_SW_PARSING_TSO_CAP);
        config->ind_table_max_size =
-               sh->device_attr.max_rwq_indirection_table_size;
-       config->tunnel_en = sh->device_attr.tunnel_offloads_caps &
+               sh->dev_cap.max_rwq_indirection_table_size;
+       config->tunnel_en = sh->dev_cap.tunnel_offloads_caps &
                (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
                 MLX5_TUNNELED_OFFLOADS_GRE_CAP |
                 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
@@ -410,7 +397,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        }
        DRV_LOG(DEBUG, "counters are not supported");
        config->ind_table_max_size =
-               sh->device_attr.max_rwq_indirection_table_size;
+               sh->dev_cap.max_rwq_indirection_table_size;
        /*
         * Remove this check once DPDK supports larger/variable
         * indirection tables.
@@ -423,9 +410,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
                config->hw_padding = 0;
        }
-       config->tso = (sh->device_attr.max_tso > 0);
+       config->tso = (sh->dev_cap.max_tso > 0);
        if (config->tso)
-               config->tso_max_payload_sz = sh->device_attr.max_tso;
+               config->tso_max_payload_sz = sh->dev_cap.max_tso;
        DRV_LOG(DEBUG, "%sMPS is %s.",
                config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
                config->mps == MLX5_MPW ? "legacy " : "",