net/mlx5: check FW miniCQE format capabilities
authorAlexander Kozyrev <akozyrev@nvidia.com>
Tue, 2 Feb 2021 02:07:37 +0000 (02:07 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 4 Feb 2021 17:19:36 +0000 (18:19 +0100)
miniCQE formats for Flow Tag and L3/L4 Header compression are only
supported by Mellanox FW starting version 16.29.392. There is no
point to allow user to enable these formats if FW cannot provide them.
Check FW capabilities and deny user requests if the selected miniCQE
format is not supported by an underlying NIC.

Fixes: 54c2d46b160f ("net/mlx5: support flow tag and packet header miniCQEs")
Cc: stable@dpdk.org
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
drivers/common/mlx5/mlx5_devx_cmds.c
drivers/common/mlx5/mlx5_devx_cmds.h
drivers/common/mlx5/mlx5_prm.h
drivers/net/mlx5/linux/mlx5_os.c

index b075af9..cc70c79 100644 (file)
@@ -744,6 +744,11 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
                                              log_compress_mmo_size);
        attr->log_max_mmo_decompress = MLX5_GET(cmd_hca_cap, hcattr,
                                                log_decompress_mmo_size);
+       attr->cqe_compression = MLX5_GET(cmd_hca_cap, hcattr, cqe_compression);
+       attr->mini_cqe_resp_flow_tag = MLX5_GET(cmd_hca_cap, hcattr,
+                                               mini_cqe_resp_flow_tag);
+       attr->mini_cqe_resp_l3_l4_tag = MLX5_GET(cmd_hca_cap, hcattr,
+                                                mini_cqe_resp_l3_l4_tag);
        if (attr->qos.sup) {
                MLX5_SET(query_hca_cap_in, in, op_mod,
                         MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
index 3e2a0a2..9dcd917 100644 (file)
@@ -126,6 +126,9 @@ struct mlx5_hca_attr {
        uint32_t regexp_num_of_engines;
        uint32_t log_max_ft_sampler_num:8;
        uint32_t geneve_tlv_opt;
+       uint32_t cqe_compression:1;
+       uint32_t mini_cqe_resp_flow_tag:1;
+       uint32_t mini_cqe_resp_l3_l4_tag:1;
        struct mlx5_hca_qos_attr qos;
        struct mlx5_hca_vdpa_attr vdpa;
        int log_max_qp_sz;
index 751dda2..de721aa 100644 (file)
@@ -1444,7 +1444,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8 max_geneve_tlv_options[0x8];
        u8 reserved_at_568[0x3];
        u8 max_geneve_tlv_option_data_len[0x5];
-       u8 reserved_at_570[0x4c];
+       u8 reserved_at_570[0x49];
+       u8 mini_cqe_resp_l3_l4_tag[0x1];
+       u8 mini_cqe_resp_flow_tag[0x1];
+       u8 enhanced_cqe_compression[0x1];
        u8 mini_cqe_resp_stride_index[0x1];
        u8 cqe_128_always[0x1];
        u8 cqe_compression_128[0x1];
index 9b95b9f..2dc0797 100644 (file)
@@ -676,7 +676,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        int err = 0;
        unsigned int hw_padding = 0;
        unsigned int mps;
-       unsigned int cqe_comp;
        unsigned int tunnel_en = 0;
        unsigned int mpls_en = 0;
        unsigned int swp = 0;
@@ -868,12 +867,8 @@ err_secondary:
                        mprq_caps.max_single_wqe_log_num_of_strides;
        }
 #endif
-       if (RTE_CACHE_LINE_SIZE == 128 &&
-           !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
-               cqe_comp = 0;
-       else
-               cqe_comp = 1;
-       config->cqe_comp = cqe_comp;
+       /* Rx CQE compression is enabled by default. */
+       config->cqe_comp = 1;
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
        if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
                tunnel_en = ((dv_attr.tunnel_offloads_caps &
@@ -1104,10 +1099,6 @@ err_secondary:
                config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
                config->mps == MLX5_MPW ? "legacy " : "",
                config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
-       if (config->cqe_comp && !cqe_comp) {
-               DRV_LOG(WARNING, "Rx CQE compression isn't supported");
-               config->cqe_comp = 0;
-       }
        if (config->devx) {
                err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
                if (err) {
@@ -1206,6 +1197,25 @@ err_secondary:
                }
 #endif
        }
+       if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 &&
+           !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
+               DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
+               config->cqe_comp = 0;
+       }
+       if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
+           (!config->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) {
+               DRV_LOG(WARNING, "Flow Tag CQE compression"
+                                " format isn't supported.");
+               config->cqe_comp = 0;
+       }
+       if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
+           (!config->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) {
+               DRV_LOG(WARNING, "L3/L4 Header CQE compression"
+                                " format isn't supported.");
+               config->cqe_comp = 0;
+       }
+       DRV_LOG(DEBUG, "Rx CQE compression is %ssupported",
+                       config->cqe_comp ? "" : "not ");
        if (config->tx_pp) {
                DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
                        config->hca_attr.dev_freq_khz);