]> git.droids-corp.org - dpdk.git/commitdiff
net/mlx5: remove HCA attribute structure duplication
authorMichael Baum <michaelba@nvidia.com>
Mon, 14 Feb 2022 09:34:58 +0000 (11:34 +0200)
committerRaslan Darawsheh <rasland@nvidia.com>
Mon, 21 Feb 2022 10:36:43 +0000 (11:36 +0100)
The HCA attribute structure is field of net configure structure.
It is also field of common configure structure.

There is no need for this duplication, because there is a reference to
the common structure from within the net structures.

This patch removes it from net configure structure and uses the common
config structure instead.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
13 files changed:
drivers/net/mlx5/linux/mlx5_os.c
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_devx.c
drivers/net/mlx5/mlx5_ethdev.c
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_flex.c
drivers/net/mlx5/mlx5_flow_meter.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_trigger.c
drivers/net/mlx5/mlx5_txpp.c
drivers/net/mlx5/windows/mlx5_os.c

index 191da1bee928fde1ab76192da132f38fb06cb8b0..b3ee1f7dc4ca8752f51e6794950850baea4fb4c7 100644 (file)
@@ -675,6 +675,7 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
        bool fallback;
 
 #ifndef HAVE_IBV_DEVX_ASYNC
@@ -682,16 +683,16 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
 #else
        fallback = false;
        if (!sh->devx || !priv->config.dv_flow_en ||
-           !priv->config.hca_attr.flow_counters_dump ||
-           !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
+           !hca_attr->flow_counters_dump ||
+           !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
            (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
                fallback = true;
 #endif
        if (fallback)
                DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
                        "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
-                       priv->config.hca_attr.flow_counters_dump,
-                       priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
+                       hca_attr->flow_counters_dump,
+                       hca_attr->flow_counter_bulk_alloc_bitmap);
        /* Initialize fallback mode only on the port initializes sh. */
        if (sh->refcnt == 1)
                sh->cmng.counter_fallback = fallback;
@@ -875,6 +876,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 {
        const struct mlx5_switch_info *switch_info = &spawn->info;
        struct mlx5_dev_ctx_shared *sh = NULL;
+       struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
        struct ibv_port_attr port_attr = { .state = IBV_PORT_NOP };
        struct mlx5dv_context dv_attr = { .comp_mask = 0 };
        struct rte_eth_dev *eth_dev = NULL;
@@ -990,7 +992,7 @@ err_secondary:
        }
 #endif
 #ifdef HAVE_MLX5DV_DR_ESWITCH
-       if (!(sh->cdev->config.hca_attr.eswitch_manager && config->dv_flow_en &&
+       if (!(hca_attr->eswitch_manager && config->dv_flow_en &&
              (switch_info->representor || switch_info->master)))
                config->dv_esw_en = 0;
 #else
@@ -1315,14 +1317,12 @@ err_secondary:
                config->mps == MLX5_MPW ? "legacy " : "",
                config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
        if (sh->devx) {
-               config->hca_attr = sh->cdev->config.hca_attr;
-               sh->steering_format_version =
-                       config->hca_attr.steering_format_version;
+               sh->steering_format_version = hca_attr->steering_format_version;
                /* Check for LRO support. */
-               if (config->dest_tir && config->hca_attr.lro_cap &&
+               if (config->dest_tir && hca_attr->lro_cap &&
                    config->dv_flow_en) {
                        /* TBD check tunnel lro caps. */
-                       config->lro.supported = config->hca_attr.lro_cap;
+                       config->lro.supported = hca_attr->lro_cap;
                        DRV_LOG(DEBUG, "Device supports LRO");
                        /*
                         * If LRO timeout is not configured by application,
@@ -1330,21 +1330,19 @@ err_secondary:
                         */
                        if (!config->lro.timeout)
                                config->lro.timeout =
-                               config->hca_attr.lro_timer_supported_periods[0];
+                                      hca_attr->lro_timer_supported_periods[0];
                        DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
                                config->lro.timeout);
                        DRV_LOG(DEBUG, "LRO minimal size of TCP segment "
                                "required for coalescing is %d bytes",
-                               config->hca_attr.lro_min_mss_size);
+                               hca_attr->lro_min_mss_size);
                }
 #if defined(HAVE_MLX5DV_DR) && \
        (defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \
         defined(HAVE_MLX5_DR_CREATE_ACTION_ASO))
-               if (config->hca_attr.qos.sup &&
-                   config->hca_attr.qos.flow_meter_old &&
+               if (hca_attr->qos.sup && hca_attr->qos.flow_meter_old &&
                    config->dv_flow_en) {
-                       uint8_t reg_c_mask =
-                               config->hca_attr.qos.flow_meter_reg_c_ids;
+                       uint8_t reg_c_mask = hca_attr->qos.flow_meter_reg_c_ids;
                        /*
                         * Meter needs two REG_C's for color match and pre-sfx
                         * flow match. Here get the REG_C for color match.
@@ -1368,20 +1366,18 @@ err_secondary:
                                        priv->mtr_color_reg = ffs(reg_c_mask)
                                                              - 1 + REG_C_0;
                                priv->mtr_en = 1;
-                               priv->mtr_reg_share =
-                                     config->hca_attr.qos.flow_meter;
+                               priv->mtr_reg_share = hca_attr->qos.flow_meter;
                                DRV_LOG(DEBUG, "The REG_C meter uses is %d",
                                        priv->mtr_color_reg);
                        }
                }
-               if (config->hca_attr.qos.sup &&
-                       config->hca_attr.qos.flow_meter_aso_sup) {
+               if (hca_attr->qos.sup && hca_attr->qos.flow_meter_aso_sup) {
                        uint32_t log_obj_size =
                                rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
                        if (log_obj_size >=
-                       config->hca_attr.qos.log_meter_aso_granularity &&
-                       log_obj_size <=
-                       config->hca_attr.qos.log_meter_aso_max_alloc)
+                           hca_attr->qos.log_meter_aso_granularity &&
+                           log_obj_size <=
+                           hca_attr->qos.log_meter_aso_max_alloc)
                                sh->meter_aso_en = 1;
                }
                if (priv->mtr_en) {
@@ -1391,12 +1387,11 @@ err_secondary:
                                goto error;
                        }
                }
-               if (config->hca_attr.flow.tunnel_header_0_1)
+               if (hca_attr->flow.tunnel_header_0_1)
                        sh->tunnel_header_0_1 = 1;
 #endif
 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
-               if (config->hca_attr.flow_hit_aso &&
-                   priv->mtr_color_reg == REG_C_3) {
+               if (hca_attr->flow_hit_aso && priv->mtr_color_reg == REG_C_3) {
                        sh->flow_hit_aso_en = 1;
                        err = mlx5_flow_aso_age_mng_init(sh);
                        if (err) {
@@ -1408,8 +1403,7 @@ err_secondary:
 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
 #if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO) && \
        defined(HAVE_MLX5_DR_ACTION_ASO_CT)
-               if (config->hca_attr.ct_offload &&
-                   priv->mtr_color_reg == REG_C_3) {
+               if (hca_attr->ct_offload && priv->mtr_color_reg == REG_C_3) {
                        err = mlx5_flow_aso_ct_mng_init(sh);
                        if (err) {
                                err = -err;
@@ -1420,13 +1414,13 @@ err_secondary:
                }
 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO && HAVE_MLX5_DR_ACTION_ASO_CT */
 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
-               if (config->hca_attr.log_max_ft_sampler_num > 0  &&
+               if (hca_attr->log_max_ft_sampler_num > 0  &&
                    config->dv_flow_en) {
                        priv->sampler_en = 1;
                        DRV_LOG(DEBUG, "Sampler enabled!");
                } else {
                        priv->sampler_en = 0;
-                       if (!config->hca_attr.log_max_ft_sampler_num)
+                       if (!hca_attr->log_max_ft_sampler_num)
                                DRV_LOG(WARNING,
                                        "No available register for sampler.");
                        else
@@ -1440,13 +1434,13 @@ err_secondary:
                config->cqe_comp = 0;
        }
        if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
-           (!sh->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) {
+           (!sh->devx || !hca_attr->mini_cqe_resp_flow_tag)) {
                DRV_LOG(WARNING, "Flow Tag CQE compression"
                                 " format isn't supported.");
                config->cqe_comp = 0;
        }
        if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
-           (!sh->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) {
+           (!sh->devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
                DRV_LOG(WARNING, "L3/L4 Header CQE compression"
                                 " format isn't supported.");
                config->cqe_comp = 0;
@@ -1455,55 +1449,55 @@ err_secondary:
                        config->cqe_comp ? "" : "not ");
        if (config->tx_pp) {
                DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
-                       config->hca_attr.dev_freq_khz);
+                       hca_attr->dev_freq_khz);
                DRV_LOG(DEBUG, "Packet pacing is %ssupported",
-                       config->hca_attr.qos.packet_pacing ? "" : "not ");
+                       hca_attr->qos.packet_pacing ? "" : "not ");
                DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
-                       config->hca_attr.cross_channel ? "" : "not ");
+                       hca_attr->cross_channel ? "" : "not ");
                DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
-                       config->hca_attr.wqe_index_ignore ? "" : "not ");
+                       hca_attr->wqe_index_ignore ? "" : "not ");
                DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
-                       config->hca_attr.non_wire_sq ? "" : "not ");
+                       hca_attr->non_wire_sq ? "" : "not ");
                DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
-                       config->hca_attr.log_max_static_sq_wq ? "" : "not ",
-                       config->hca_attr.log_max_static_sq_wq);
+                       hca_attr->log_max_static_sq_wq ? "" : "not ",
+                       hca_attr->log_max_static_sq_wq);
                DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
-                       config->hca_attr.qos.wqe_rate_pp ? "" : "not ");
+                       hca_attr->qos.wqe_rate_pp ? "" : "not ");
                if (!sh->devx) {
                        DRV_LOG(ERR, "DevX is required for packet pacing");
                        err = ENODEV;
                        goto error;
                }
-               if (!config->hca_attr.qos.packet_pacing) {
+               if (!hca_attr->qos.packet_pacing) {
                        DRV_LOG(ERR, "Packet pacing is not supported");
                        err = ENODEV;
                        goto error;
                }
-               if (!config->hca_attr.cross_channel) {
+               if (!hca_attr->cross_channel) {
                        DRV_LOG(ERR, "Cross channel operations are"
                                     " required for packet pacing");
                        err = ENODEV;
                        goto error;
                }
-               if (!config->hca_attr.wqe_index_ignore) {
+               if (!hca_attr->wqe_index_ignore) {
                        DRV_LOG(ERR, "WQE index ignore feature is"
                                     " required for packet pacing");
                        err = ENODEV;
                        goto error;
                }
-               if (!config->hca_attr.non_wire_sq) {
+               if (!hca_attr->non_wire_sq) {
                        DRV_LOG(ERR, "Non-wire SQ feature is"
                                     " required for packet pacing");
                        err = ENODEV;
                        goto error;
                }
-               if (!config->hca_attr.log_max_static_sq_wq) {
+               if (!hca_attr->log_max_static_sq_wq) {
                        DRV_LOG(ERR, "Static WQE SQ feature is"
                                     " required for packet pacing");
                        err = ENODEV;
                        goto error;
                }
-               if (!config->hca_attr.qos.wqe_rate_pp) {
+               if (!hca_attr->qos.wqe_rate_pp) {
                        DRV_LOG(ERR, "WQE rate mode is required"
                                     " for packet pacing");
                        err = ENODEV;
@@ -1517,7 +1511,7 @@ err_secondary:
 #endif
        }
        if (config->std_delay_drop || config->hp_delay_drop) {
-               if (!config->hca_attr.rq_delay_drop) {
+               if (!hca_attr->rq_delay_drop) {
                        config->std_delay_drop = 0;
                        config->hp_delay_drop = 0;
                        DRV_LOG(WARNING,
@@ -1528,7 +1522,7 @@ err_secondary:
        if (sh->devx) {
                uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
 
-               err = config->hca_attr.access_register_user ?
+               err = hca_attr->access_register_user ?
                        mlx5_devx_cmd_register_read
                                (sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0,
                                reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
@@ -1542,8 +1536,7 @@ err_secondary:
                                config->rt_timestamp = 1;
                } else {
                        /* Kernel does not support register reading. */
-                       if (config->hca_attr.dev_freq_khz ==
-                                                (NS_PER_S / MS_PER_S))
+                       if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
                                config->rt_timestamp = 1;
                }
        }
@@ -1552,7 +1545,7 @@ err_secondary:
         * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
         * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
         */
-       if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
+       if (hca_attr->scatter_fcs_w_decap_disable && config->decap_en)
                config->hw_fcs_strip = 0;
        DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
                (config->hw_fcs_strip ? "" : "not "));
index d1215e56279cd53ea8aff95f192a6b06b9cfe187..a713391268db8da69f8a8bcfcad6df9d896a1c17 100644 (file)
@@ -889,7 +889,7 @@ mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
        uint32_t ids[8];
        int ret;
 
-       if (!priv->config.hca_attr.parse_graph_flex_node) {
+       if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) {
                DRV_LOG(ERR, "Dynamic flex parser is not supported "
                        "for device %s.", priv->dev_data->name);
                return -ENOTSUP;
@@ -2035,6 +2035,8 @@ void
 mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
                    struct mlx5_dev_config *config)
 {
+       struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
+
        if (config->txq_inline_min != MLX5_ARG_UNSET) {
                /* Application defines size of inlined data explicitly. */
                if (spawn->pci_dev != NULL) {
@@ -2054,9 +2056,9 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
                }
                goto exit;
        }
-       if (config->hca_attr.eth_net_offloads) {
+       if (hca_attr->eth_net_offloads) {
                /* We have DevX enabled, inline mode queried successfully. */
-               switch (config->hca_attr.wqe_inline_mode) {
+               switch (hca_attr->wqe_inline_mode) {
                case MLX5_CAP_INLINE_MODE_L2:
                        /* outer L2 header must be inlined. */
                        config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
@@ -2065,14 +2067,14 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
                        /* No inline data are required by NIC. */
                        config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
                        config->hw_vlan_insert =
-                               config->hca_attr.wqe_vlan_insert;
+                               hca_attr->wqe_vlan_insert;
                        DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
                        goto exit;
                case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
                        /* inline mode is defined by NIC vport context. */
-                       if (!config->hca_attr.eth_virt)
+                       if (!hca_attr->eth_virt)
                                break;
-                       switch (config->hca_attr.vport_inline_mode) {
+                       switch (hca_attr->vport_inline_mode) {
                        case MLX5_INLINE_MODE_NONE:
                                config->txq_inline_min =
                                        MLX5_INLINE_HSIZE_NONE;
index e4b2523eb0b1f5ec739a07ef43f28cf24345b8b7..ee485343ffe6e4f8774bad40735345d4453b0b97 100644 (file)
@@ -299,7 +299,6 @@ struct mlx5_dev_config {
        int txq_inline_mpw; /* Max packet size for inlining with eMPW. */
        int tx_pp; /* Timestamp scheduling granularity in nanoseconds. */
        int tx_skew; /* Tx scheduling skew between WQE and data on wire. */
-       struct mlx5_hca_attr hca_attr; /* HCA attributes. */
        struct mlx5_lro_config lro; /* LRO configuration. */
 };
 
index 91243f684fb08d61dc8fedf87ae2fe07d452934b..97c89250448f6c92cf40ae0d1233d2c7485a6210 100644 (file)
@@ -419,7 +419,8 @@ mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq)
        MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL);
        tmpl->rxq_ctrl = rxq_ctrl;
        attr.hairpin = 1;
-       max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+       max_wq_data =
+               priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz;
        /* Jumbo frames > 9KB should be supported, and more packets. */
        if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
                if (priv->config.log_hp_size > max_wq_data) {
@@ -1117,7 +1118,8 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
        tmpl->txq_ctrl = txq_ctrl;
        attr.hairpin = 1;
        attr.tis_lst_sz = 1;
-       max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+       max_wq_data =
+               priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz;
        /* Jumbo frames > 9KB should be supported, and more packets. */
        if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
                if (priv->config.log_hp_size > max_wq_data) {
@@ -1193,7 +1195,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
        struct mlx5_devx_create_sq_attr sq_attr = {
                .flush_in_error_en = 1,
                .allow_multi_pkt_send_wqe = !!priv->config.mps,
-               .min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode,
+               .min_wqe_inline_mode = cdev->config.hca_attr.vport_inline_mode,
                .allow_swp = !!priv->config.swp,
                .cqn = txq_obj->cq_obj.cq->id,
                .tis_lst_sz = 1,
index dc647d5580c076139536c7dc8b526a7c9e57c570..5b0eee3321c90110e4eed8da6da95e28ed583545 100644 (file)
@@ -337,7 +337,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
        info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
        mlx5_set_default_params(dev, info);
        mlx5_set_txlimit_params(dev, info);
-       if (priv->config.hca_attr.mem_rq_rmp &&
+       if (priv->sh->cdev->config.hca_attr.mem_rq_rmp &&
            priv->obj_ops.rxq_obj_new == devx_obj_ops.rxq_obj_new)
                info->dev_capa |= RTE_ETH_DEV_CAPA_RXQ_SHARE;
        info->switch_info.name = dev->data->name;
index 179cc3b303783e38f342e0e4e8a1a48d2f27490a..29b4516709280a6c3438b8b0deceeb321acf9391 100644 (file)
@@ -2906,7 +2906,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
        const struct rte_flow_item_geneve *mask = item->mask;
        int ret;
        uint16_t gbhdr;
-       uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
+       uint8_t opt_len = priv->sh->cdev->config.hca_attr.geneve_max_opt_len ?
                          MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
        const struct rte_flow_item_geneve nic_mask = {
                .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
@@ -2914,7 +2914,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
                .protocol = RTE_BE16(UINT16_MAX),
        };
 
-       if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
+       if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_geneve_rx)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 Geneve is not enabled by device"
@@ -2994,10 +2994,9 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
        struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
-       struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
+       struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
        uint8_t data_max_supported =
                        hca_attr->max_geneve_tlv_option_data_len * 4;
-       struct mlx5_dev_config *config = &priv->config;
        const struct rte_flow_item_geneve *geneve_spec;
        const struct rte_flow_item_geneve *geneve_mask;
        const struct rte_flow_item_geneve_opt *spec = item->spec;
@@ -3031,11 +3030,11 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
                        "Geneve TLV opt class/type/length masks must be full");
        /* Check if length is supported */
        if ((uint32_t)spec->option_len >
-                       config->hca_attr.max_geneve_tlv_option_data_len)
+                       hca_attr->max_geneve_tlv_option_data_len)
                return rte_flow_error_set
                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
                        "Geneve TLV opt length not supported");
-       if (config->hca_attr.max_geneve_tlv_options > 1)
+       if (hca_attr->max_geneve_tlv_options > 1)
                DRV_LOG(DEBUG,
                        "max_geneve_tlv_options supports more than 1 option");
        /* Check GENEVE item preceding. */
@@ -3090,7 +3089,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
                                        "Data mask is of unsupported size");
        }
        /* Check GENEVE option is supported in NIC. */
-       if (!config->hca_attr.geneve_tlv_opt)
+       if (!hca_attr->geneve_tlv_opt)
                return rte_flow_error_set
                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
                        "Geneve TLV opt not supported");
@@ -6249,7 +6248,8 @@ flow_create_split_sample(struct rte_eth_dev *dev,
                 * When reg_c_preserve is set, metadata registers Cx preserve
                 * their value even through packet duplication.
                 */
-               add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
+               add_tag = (!fdb_tx ||
+                          priv->sh->cdev->config.hca_attr.reg_c_preserve);
                if (add_tag)
                        sfx_items = (struct rte_flow_item *)((char *)sfx_actions
                                        + act_size);
index ef9c66eddf061e6d2de0be52772c343bf9b41e56..b0ed9f93a0a10a1ea6ced32f13e68d48f41114e2 100644 (file)
@@ -2317,7 +2317,7 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
                .teid = RTE_BE32(0xffffffff),
        };
 
-       if (!priv->config.hca_attr.tunnel_stateless_gtp)
+       if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_gtp)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "GTP support is not enabled");
@@ -2426,6 +2426,7 @@ flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
 {
        int ret;
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
        const struct rte_flow_item_ipv4 *spec = item->spec;
        const struct rte_flow_item_ipv4 *last = item->last;
        const struct rte_flow_item_ipv4 *mask = item->mask;
@@ -2444,8 +2445,8 @@ flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
 
        if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
                int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
-               bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
-                              priv->config.hca_attr.inner_ipv4_ihl;
+               bool ihl_cap = !tunnel ?
+                              attr->outer_ipv4_ihl : attr->inner_ipv4_ihl;
                if (!ihl_cap)
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -3384,7 +3385,7 @@ flow_dv_validate_action_decap(struct rte_eth_dev *dev,
 {
        const struct mlx5_priv *priv = dev->data->dev_private;
 
-       if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
+       if (priv->sh->cdev->config.hca_attr.scatter_fcs_w_decap_disable &&
            !priv->config.decap_en)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -5753,7 +5754,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
                                                  NULL,
                                                  "E-Switch must has a dest "
                                                  "port for mirroring");
-               if (!priv->config.hca_attr.reg_c_preserve &&
+               if (!priv->sh->cdev->config.hca_attr.reg_c_preserve &&
                     priv->representor_id != UINT16_MAX)
                        *fdb_mirror_limit = 1;
        }
@@ -6686,7 +6687,7 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
        const struct rte_flow_item_integrity *spec = (typeof(spec))
                                                     integrity_item->spec;
 
-       if (!priv->config.hca_attr.pkt_integrity_match)
+       if (!priv->sh->cdev->config.hca_attr.pkt_integrity_match)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          integrity_item,
index 9413d4d81725f505d994d61344d249f12d3816fb..26f0dfa36f7710d608a05014c6c345a0aade9c65 100644 (file)
@@ -910,7 +910,7 @@ mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
         * offsets in any order.
         *
         * Gather all similar fields together, build array of bit intervals
-        * in asсending order and try to cover with the smallest set of sample
+        * in ascending order and try to cover with the smallest set of sample
         * registers.
         */
        memset(&cover, 0, sizeof(cover));
@@ -1153,7 +1153,7 @@ mlx5_flex_translate_conf(struct rte_eth_dev *dev,
                         struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_hca_flex_attr *attr = &priv->config.hca_attr.flex;
+       struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
        int ret;
 
        ret = mlx5_flex_translate_length(attr, conf, devx, error);
index 4f5de5e4226619de12c8a6440d07160c0c9522ed..2310ea6a86f51efaaa1019d2081cd7efebd34c77 100644 (file)
@@ -155,7 +155,7 @@ mlx5_flow_meter_profile_validate(struct rte_eth_dev *dev,
                                          "Meter profile already exists.");
        if (!priv->sh->meter_aso_en) {
                /* Old version is even not supported. */
-               if (!priv->config.hca_attr.qos.flow_meter_old)
+               if (!priv->sh->cdev->config.hca_attr.qos.flow_meter_old)
                        return -rte_mtr_error_set(error, ENOTSUP,
                                RTE_MTR_ERROR_TYPE_METER_PROFILE,
                                NULL, "Metering is not supported.");
@@ -428,7 +428,7 @@ mlx5_flow_mtr_cap_get(struct rte_eth_dev *dev,
                 struct rte_mtr_error *error __rte_unused)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_hca_qos_attr *qattr = &priv->config.hca_attr.qos;
+       struct mlx5_hca_qos_attr *qattr = &priv->sh->cdev->config.hca_attr.qos;
 
        if (!priv->mtr_en)
                return -rte_mtr_error_set(error, ENOTSUP,
index 580d7ae868139b88f83df1ccf97c4b2692e77538..0ede46aa43c3c4baa710511164a144e4149e455d 100644 (file)
@@ -863,7 +863,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
        }
        if (conf->share_group > 0) {
-               if (!priv->config.hca_attr.mem_rq_rmp) {
+               if (!priv->sh->cdev->config.hca_attr.mem_rq_rmp) {
                        DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
                                     dev->data->port_id, idx);
                        rte_errno = EINVAL;
@@ -1517,7 +1517,7 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
 
-       if (priv->config.hca_attr.lro_max_msg_sz_mode ==
+       if (priv->sh->cdev->config.hca_attr.lro_max_msg_sz_mode ==
            MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
            MLX5_MAX_TCP_HDR_OFFSET)
                max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
index 3a59237b1a7a8c6a67549878c556dcb05b36cf9b..0418ce2fafde65e031c4f255fcc75d59677c7026 100644 (file)
@@ -341,14 +341,16 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                sq_attr.state = MLX5_SQC_STATE_RDY;
                sq_attr.sq_state = MLX5_SQC_STATE_RST;
                sq_attr.hairpin_peer_rq = rq->id;
-               sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
+               sq_attr.hairpin_peer_vhca =
+                               priv->sh->cdev->config.hca_attr.vhca_id;
                ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
                if (ret)
                        goto error;
                rq_attr.state = MLX5_SQC_STATE_RDY;
                rq_attr.rq_state = MLX5_SQC_STATE_RST;
                rq_attr.hairpin_peer_sq = sq->id;
-               rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
+               rq_attr.hairpin_peer_vhca =
+                               priv->sh->cdev->config.hca_attr.vhca_id;
                ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
                if (ret)
                        goto error;
@@ -425,7 +427,7 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
                        return -rte_errno;
                }
                peer_info->qp_id = txq_ctrl->obj->sq->id;
-               peer_info->vhca_id = priv->config.hca_attr.vhca_id;
+               peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
                /* 1-to-1 mapping, only the first one is used. */
                peer_info->peer_q = txq_ctrl->hairpin_conf.peers[0].queue;
                peer_info->tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
@@ -455,7 +457,7 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
                        return -rte_errno;
                }
                peer_info->qp_id = rxq_ctrl->obj->rq->id;
-               peer_info->vhca_id = priv->config.hca_attr.vhca_id;
+               peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
                peer_info->peer_q = rxq->hairpin_conf.peers[0].queue;
                peer_info->tx_explicit = rxq->hairpin_conf.tx_explicit;
                peer_info->manual_bind = rxq->hairpin_conf.manual_bind;
@@ -817,7 +819,7 @@ mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
                /* Pass TxQ's information to peer RxQ and try binding. */
                cur.peer_q = rx_queue;
                cur.qp_id = txq_ctrl->obj->sq->id;
-               cur.vhca_id = priv->config.hca_attr.vhca_id;
+               cur.vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
                cur.tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
                cur.manual_bind = txq_ctrl->hairpin_conf.manual_bind;
                /*
index af77e91e4cbbc9ed5176bf0731b0ff61ab2a9b36..1d16ebcb41e1c745f60570c8109bc5bff4e3f43e 100644 (file)
@@ -825,7 +825,7 @@ mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
        sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
        sh->txpp.test = !!(tx_pp < 0);
        sh->txpp.skew = priv->config.tx_skew;
-       sh->txpp.freq = priv->config.hca_attr.dev_freq_khz;
+       sh->txpp.freq = sh->cdev->config.hca_attr.dev_freq_khz;
        ret = mlx5_txpp_create_event_channel(sh);
        if (ret)
                goto exit;
index 0966da10f4342961539433cdffb834dc07c448f2..07a9583cab2299d1d8625073389bdf7b654498bb 100644 (file)
@@ -268,6 +268,7 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
+       struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
        bool fallback;
 
 #ifndef HAVE_IBV_DEVX_ASYNC
@@ -275,16 +276,16 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
 #else
        fallback = false;
        if (!sh->devx || !priv->config.dv_flow_en ||
-           !priv->config.hca_attr.flow_counters_dump ||
-           !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
+           !hca_attr->flow_counters_dump ||
+           !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
            (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
                fallback = true;
 #endif
        if (fallback)
                DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
                        "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
-                       priv->config.hca_attr.flow_counters_dump,
-                       priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
+                       hca_attr->flow_counters_dump,
+                       hca_attr->flow_counter_bulk_alloc_bitmap);
        /* Initialize fallback mode only on the port initializes sh. */
        if (sh->refcnt == 1)
                sh->cmng.counter_fallback = fallback;
@@ -318,6 +319,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        const struct mlx5_switch_info *switch_info = &spawn->info;
        struct mlx5_dev_ctx_shared *sh = NULL;
        struct mlx5_dev_attr device_attr;
+       struct mlx5_hca_attr *hca_attr;
        struct rte_eth_dev *eth_dev = NULL;
        struct mlx5_priv *priv = NULL;
        int err = 0;
@@ -475,19 +477,19 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                config->cqe_comp = 0;
        }
        if (sh->devx) {
-               config->hca_attr = sh->cdev->config.hca_attr;
-               config->hw_csum = config->hca_attr.csum_cap;
+               hca_attr = &sh->cdev->config.hca_attr;
+               config->hw_csum = hca_attr->csum_cap;
                DRV_LOG(DEBUG, "checksum offloading is %ssupported",
-                   (config->hw_csum ? "" : "not "));
-               config->hw_vlan_strip = config->hca_attr.vlan_cap;
+                       (config->hw_csum ? "" : "not "));
+               config->hw_vlan_strip = hca_attr->vlan_cap;
                DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
                        (config->hw_vlan_strip ? "" : "not "));
-               config->hw_fcs_strip = config->hca_attr.scatter_fcs;
+               config->hw_fcs_strip = hca_attr->scatter_fcs;
        }
        if (sh->devx) {
                uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
 
-               err = config->hca_attr.access_register_user ?
+               err = hca_attr->access_register_user ?
                        mlx5_devx_cmd_register_read
                                (sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0,
                                reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
@@ -501,8 +503,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                                config->rt_timestamp = 1;
                } else {
                        /* Kernel does not support register reading. */
-                       if (config->hca_attr.dev_freq_khz ==
-                                                (NS_PER_S / MS_PER_S))
+                       if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
                                config->rt_timestamp = 1;
                }
        }