common/mlx5: share VF check function
[dpdk.git] / drivers / net / mlx5 / linux / mlx5_os.c
index b3ee1f7..d1bf899 100644 (file)
@@ -171,6 +171,15 @@ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
        device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
 
        struct mlx5dv_context dv_attr = { .comp_mask = 0 };
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+       dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
+#endif
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+       dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
+#endif
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+       dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
+#endif
        err = mlx5_glue->dv_query_device(ctx, &dv_attr);
        if (err) {
                rte_errno = errno;
@@ -183,6 +192,7 @@ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
        device_attr->sw_parsing_offloads =
                dv_attr.sw_parsing_caps.sw_parsing_offloads;
 #endif
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
        device_attr->min_single_stride_log_num_of_bytes =
                dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
        device_attr->max_single_stride_log_num_of_bytes =
@@ -193,6 +203,7 @@ mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
                dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
        device_attr->stride_supported_qpts =
                dv_attr.striding_rq_caps.supported_qpts;
+#endif
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
        device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
 #endif
@@ -682,7 +693,7 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
        fallback = true;
 #else
        fallback = false;
-       if (!sh->devx || !priv->config.dv_flow_en ||
+       if (!sh->cdev->config.devx || !priv->config.dv_flow_en ||
            !hca_attr->flow_counters_dump ||
            !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
            (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
@@ -878,7 +889,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        struct mlx5_dev_ctx_shared *sh = NULL;
        struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
        struct ibv_port_attr port_attr = { .state = IBV_PORT_NOP };
-       struct mlx5dv_context dv_attr = { .comp_mask = 0 };
        struct rte_eth_dev *eth_dev = NULL;
        struct mlx5_priv *priv = NULL;
        int err = 0;
@@ -1011,23 +1021,13 @@ err_secondary:
                goto error;
 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
        config->dest_tir = 1;
-#endif
-#ifdef HAVE_IBV_MLX5_MOD_SWP
-       dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
 #endif
        /*
         * Multi-packet send is supported by ConnectX-4 Lx PF as well
         * as all ConnectX-5 devices.
         */
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-       dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
-#endif
-#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
-       dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
-#endif
-       mlx5_glue->dv_query_device(sh->cdev->ctx, &dv_attr);
-       if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
-               if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
+       if (sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
+               if (sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
                        DRV_LOG(DEBUG, "enhanced MPW is supported");
                        mps = MLX5_MPW_ENHANCED;
                } else {
@@ -1039,44 +1039,41 @@ err_secondary:
                mps = MLX5_MPW_DISABLED;
        }
 #ifdef HAVE_IBV_MLX5_MOD_SWP
-       if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
-               swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
+       if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
+               swp = sh->device_attr.sw_parsing_offloads;
        DRV_LOG(DEBUG, "SWP support: %u", swp);
 #endif
        config->swp = swp & (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
                MLX5_SW_PARSING_TSO_CAP);
 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
-       if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
-               struct mlx5dv_striding_rq_caps mprq_caps =
-                       dv_attr.striding_rq_caps;
-
+       if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
                DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
-                       mprq_caps.min_single_stride_log_num_of_bytes);
+                       sh->device_attr.min_single_stride_log_num_of_bytes);
                DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
-                       mprq_caps.max_single_stride_log_num_of_bytes);
+                       sh->device_attr.max_single_stride_log_num_of_bytes);
                DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
-                       mprq_caps.min_single_wqe_log_num_of_strides);
+                       sh->device_attr.min_single_wqe_log_num_of_strides);
                DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
-                       mprq_caps.max_single_wqe_log_num_of_strides);
+                       sh->device_attr.max_single_wqe_log_num_of_strides);
                DRV_LOG(DEBUG, "\tsupported_qpts: %d",
-                       mprq_caps.supported_qpts);
+                       sh->device_attr.stride_supported_qpts);
                DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %d",
                        config->mprq.log_min_stride_wqe_size);
                DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
                mprq = 1;
                config->mprq.log_min_stride_size =
-                       mprq_caps.min_single_stride_log_num_of_bytes;
+                       sh->device_attr.min_single_stride_log_num_of_bytes;
                config->mprq.log_max_stride_size =
-                       mprq_caps.max_single_stride_log_num_of_bytes;
+                       sh->device_attr.max_single_stride_log_num_of_bytes;
                config->mprq.log_min_stride_num =
-                       mprq_caps.min_single_wqe_log_num_of_strides;
+                       sh->device_attr.min_single_wqe_log_num_of_strides;
                config->mprq.log_max_stride_num =
-                       mprq_caps.max_single_wqe_log_num_of_strides;
+                       sh->device_attr.max_single_wqe_log_num_of_strides;
        }
 #endif
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-       if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
-               config->tunnel_en = dv_attr.tunnel_offloads_caps &
+       if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+               config->tunnel_en = sh->device_attr.tunnel_offloads_caps &
                             (MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN |
                              MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE |
                              MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE);
@@ -1098,9 +1095,9 @@ err_secondary:
                "tunnel offloading disabled due to old OFED/rdma-core version");
 #endif
 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
-       mpls_en = ((dv_attr.tunnel_offloads_caps &
+       mpls_en = ((sh->device_attr.tunnel_offloads_caps &
                    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
-                  (dv_attr.tunnel_offloads_caps &
+                  (sh->device_attr.tunnel_offloads_caps &
                    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
        DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
                mpls_en ? "" : "not ");
@@ -1316,7 +1313,7 @@ err_secondary:
                config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
                config->mps == MLX5_MPW ? "legacy " : "",
                config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
-       if (sh->devx) {
+       if (sh->cdev->config.devx) {
                sh->steering_format_version = hca_attr->steering_format_version;
                /* Check for LRO support. */
                if (config->dest_tir && hca_attr->lro_cap &&
@@ -1429,18 +1426,18 @@ err_secondary:
 #endif
        }
        if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 &&
-           !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
+           !(sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
                DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
                config->cqe_comp = 0;
        }
        if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
-           (!sh->devx || !hca_attr->mini_cqe_resp_flow_tag)) {
+           (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_flow_tag)) {
                DRV_LOG(WARNING, "Flow Tag CQE compression"
                                 " format isn't supported.");
                config->cqe_comp = 0;
        }
        if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
-           (!sh->devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
+           (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
                DRV_LOG(WARNING, "L3/L4 Header CQE compression"
                                 " format isn't supported.");
                config->cqe_comp = 0;
@@ -1463,7 +1460,7 @@ err_secondary:
                        hca_attr->log_max_static_sq_wq);
                DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
                        hca_attr->qos.wqe_rate_pp ? "" : "not ");
-               if (!sh->devx) {
+               if (!sh->cdev->config.devx) {
                        DRV_LOG(ERR, "DevX is required for packet pacing");
                        err = ENODEV;
                        goto error;
@@ -1519,7 +1516,7 @@ err_secondary:
                                priv->dev_port);
                }
        }
-       if (sh->devx) {
+       if (sh->cdev->config.devx) {
                uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
 
                err = hca_attr->access_register_user ?
@@ -1676,7 +1673,7 @@ err_secondary:
                if (mlx5_flex_item_port_init(eth_dev) < 0)
                        goto error;
        }
-       if (sh->devx && config->dv_flow_en && config->dest_tir) {
+       if (sh->cdev->config.devx && config->dv_flow_en && config->dest_tir) {
                priv->obj_ops = devx_obj_ops;
                mlx5_queue_counter_id_prepare(eth_dev);
                priv->obj_ops.lb_dummy_queue_create =
@@ -2103,7 +2100,6 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
        struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
        struct mlx5_dev_spawn_data *list = NULL;
        struct mlx5_dev_config dev_config;
-       unsigned int dev_config_vf;
        struct rte_eth_devargs eth_da = *req_eth_da;
        struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */
        struct mlx5_bond_info bond_info;
@@ -2424,21 +2420,6 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
         * (i.e. master first, then representors from lowest to highest ID).
         */
        qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
-       /* Device specific configuration. */
-       switch (pci_dev->id.device_id) {
-       case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
-       case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
-       case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
-       case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
-       case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
-       case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
-       case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
-               dev_config_vf = 1;
-               break;
-       default:
-               dev_config_vf = 0;
-               break;
-       }
        if (eth_da.type != RTE_ETH_REPRESENTOR_NONE) {
                /* Set devargs default values. */
                if (eth_da.nb_mh_controllers == 0) {
@@ -2462,7 +2443,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
 
                /* Default configuration. */
                mlx5_os_config_default(&dev_config, &cdev->config);
-               dev_config.vf = dev_config_vf;
+               dev_config.vf = mlx5_dev_is_vf_pci(pci_dev);
                list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i],
                                                 &dev_config, &eth_da);
                if (!list[i].eth_dev) {
@@ -2735,7 +2716,7 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
                        rte_intr_fd_set(sh->intr_handle, -1);
                }
        }
-       if (sh->devx) {
+       if (sh->cdev->config.devx) {
 #ifdef HAVE_IBV_DEVX_ASYNC
                sh->intr_handle_devx =
                        rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);