]> git.droids-corp.org - dpdk.git/commitdiff
common/mlx5: add minimum WQE size for striding RQ
authorMichael Baum <michaelba@nvidia.com>
Tue, 23 Nov 2021 18:38:03 +0000 (20:38 +0200)
committerRaslan Darawsheh <rasland@nvidia.com>
Sun, 5 Dec 2021 11:22:05 +0000 (12:22 +0100)
Some devices have a WQE size limit for striding RQ. On some newer
devices, this limitation is smaller and information on its size is
provided by the firmware.

This patch adds the attribute query from firmware: the minimum required
size of WQE buffer for striding RQ in granularity of Bytes.

Cc: stable@dpdk.org
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/common/mlx5/mlx5_devx_cmds.c
drivers/common/mlx5/mlx5_devx_cmds.h
drivers/common/mlx5/mlx5_prm.h

index 7cd3d4fa9878280f4cde06e36462b5e3f6205cf2..2e807a08297d7030025980fab678ac1e33d83514 100644 (file)
@@ -823,6 +823,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 {
        uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
        uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
+       bool hca_cap_2_sup;
        uint64_t general_obj_types_supported = 0;
        void *hcattr;
        int rc, i;
@@ -832,6 +833,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
                        MLX5_HCA_CAP_OPMOD_GET_CUR);
        if (!hcattr)
                return rc;
+       hca_cap_2_sup = MLX5_GET(cmd_hca_cap, hcattr, hca_cap_2);
        attr->max_wqe_sz_sq = MLX5_GET(cmd_hca_cap, hcattr, max_wqe_sz_sq);
        attr->flow_counter_bulk_alloc_bitmap =
                        MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
@@ -967,6 +969,20 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
                                         general_obj_types) &
                              MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD);
        attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop);
+       if (hca_cap_2_sup) {
+               hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+                               MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
+                               MLX5_HCA_CAP_OPMOD_GET_CUR);
+               if (!hcattr) {
+                       DRV_LOG(DEBUG,
+                               "Failed to query DevX HCA capabilities 2.");
+                       return rc;
+               }
+               attr->log_min_stride_wqe_sz = MLX5_GET(cmd_hca_cap_2, hcattr,
+                                                      log_min_stride_wqe_sz);
+       }
+       if (attr->log_min_stride_wqe_sz == 0)
+               attr->log_min_stride_wqe_sz = MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
        if (attr->qos.sup) {
                hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
                                MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
index d7f71646a3032e9319598bb89dc3b74e58265bd8..37821b493ef5768bd3e4fb14d733af453b6daa04 100644 (file)
@@ -251,6 +251,7 @@ struct mlx5_hca_attr {
        uint32_t log_max_mmo_decompress:5;
        uint32_t umr_modify_entity_size_disabled:1;
        uint32_t umr_indirect_mkey_disabled:1;
+       uint32_t log_min_stride_wqe_sz:5;
        uint16_t max_wqe_sz_sq;
 };
 
index 982a53ffbe031da6ab9a987a61437e64b1f381f0..495b63191aba516fd41c939658f4c7dff5c76d68 100644 (file)
 /* The maximum log value of segments per RQ WQE. */
 #define MLX5_MAX_LOG_RQ_SEGS 5u
 
+/* Log 2 of the default size of a WQE for Multi-Packet RQ. */
+#define MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE 14U
+
 /* The alignment needed for WQ buffer. */
 #define MLX5_WQE_BUF_ALIGNMENT rte_mem_page_size()
 
@@ -1342,7 +1345,9 @@ enum {
 #define MLX5_STEERING_LOGIC_FORMAT_CONNECTX_6DX 0x1
 
 struct mlx5_ifc_cmd_hca_cap_bits {
-       u8 reserved_at_0[0x30];
+       u8 reserved_at_0[0x20];
+       u8 hca_cap_2[0x1];
+       u8 reserved_at_21[0xf];
        u8 vhca_id[0x10];
        u8 reserved_at_40[0x20];
        u8 reserved_at_60[0x3];
@@ -1909,7 +1914,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
        u8 max_reformat_insert_offset[0x8];
        u8 max_reformat_remove_size[0x8];
        u8 max_reformat_remove_offset[0x8]; /* End of DW6. */
-       u8 aso_conntrack_reg_id[0x8];
+       u8 reserved_at_c0[0x3];
+       u8 log_min_stride_wqe_sz[0x5];
        u8 reserved_at_c8[0x3];
        u8 log_conn_track_granularity[0x5];
        u8 reserved_at_d0[0x3];
@@ -1922,6 +1928,7 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
 
 union mlx5_ifc_hca_cap_union_bits {
        struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+       struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
        struct mlx5_ifc_per_protocol_networking_offload_caps_bits
               per_protocol_networking_offload_caps;
        struct mlx5_ifc_qos_cap_bits qos_cap;