common/mlx5: add LAG context query
authorRongwei Liu <rongweil@nvidia.com>
Thu, 21 Oct 2021 08:56:35 +0000 (11:56 +0300)
committerRaslan Darawsheh <rasland@nvidia.com>
Thu, 21 Oct 2021 10:36:57 +0000 (12:36 +0200)
Added a new function mlx5_devx_cmd_query_lag() to query LAG
property from firmware including state/affinity/mode etc.

Signed-off-by: Jiawei Wang <jiaweiw@nvidia.com>
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/common/mlx5/mlx5_devx_cmds.c
drivers/common/mlx5/mlx5_devx_cmds.h
drivers/common/mlx5/mlx5_prm.h
drivers/common/mlx5/version.map

index 6538bce..fb7c8e9 100644 (file)
@@ -2800,3 +2800,43 @@ mlx5_devx_cmd_create_crypto_login_obj(void *ctx,
        crypto_login_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
        return crypto_login_obj;
 }
+
+/**
+ * Query LAG context.
+ *
+ * @param[in] ctx
+ *   Pointer to ibv_context, returned from mlx5dv_open_device.
+ * @param[out] lag_ctx
+ *   Pointer to struct mlx5_devx_lag_context, to be set by the routine.
+ *
+ * @return
+ *   0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_query_lag(void *ctx,
+                       struct mlx5_devx_lag_context *lag_ctx)
+{
+       uint32_t in[MLX5_ST_SZ_DW(query_lag_in)] = {0};
+       uint32_t out[MLX5_ST_SZ_DW(query_lag_out)] = {0};
+       void *lctx;
+       int rc;
+
+       MLX5_SET(query_lag_in, in, opcode, MLX5_CMD_OP_QUERY_LAG);
+       rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
+       if (rc)
+               goto error;
+       lctx = MLX5_ADDR_OF(query_lag_out, out, context);
+       lag_ctx->fdb_selection_mode = MLX5_GET(lag_context, lctx,
+                                              fdb_selection_mode);
+       lag_ctx->port_select_mode = MLX5_GET(lag_context, lctx,
+                                              port_select_mode);
+       lag_ctx->lag_state = MLX5_GET(lag_context, lctx, lag_state);
+       lag_ctx->tx_remap_affinity_2 = MLX5_GET(lag_context, lctx,
+                                               tx_remap_affinity_2);
+       lag_ctx->tx_remap_affinity_1 = MLX5_GET(lag_context, lctx,
+                                               tx_remap_affinity_1);
+       return 0;
+error:
+       rc = (rc > 0) ? -rc : rc;
+       return rc;
+}
index 979bc94..80b5dca 100644 (file)
@@ -189,6 +189,15 @@ struct mlx5_hca_attr {
        uint32_t umr_indirect_mkey_disabled:1;
 };
 
+/* LAG Context. */
+struct mlx5_devx_lag_context {
+       uint32_t fdb_selection_mode:1;
+       uint32_t port_select_mode:3;
+       uint32_t lag_state:3;
+       uint32_t tx_remap_affinity_1:4;
+       uint32_t tx_remap_affinity_2:4;
+};
+
 struct mlx5_devx_wq_attr {
        uint32_t wq_type:4;
        uint32_t wq_signature:1;
@@ -673,4 +682,8 @@ struct mlx5_devx_obj *
 mlx5_devx_cmd_create_crypto_login_obj(void *ctx,
                                      struct mlx5_devx_crypto_login_attr *attr);
 
+__rte_internal
+int
+mlx5_devx_cmd_query_lag(void *ctx,
+                       struct mlx5_devx_lag_context *lag_ctx);
 #endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */
index 54e62aa..eab80ea 100644 (file)
@@ -1048,6 +1048,7 @@ enum {
        MLX5_CMD_OP_DEALLOC_PD = 0x801,
        MLX5_CMD_OP_ACCESS_REGISTER = 0x805,
        MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
+       MLX5_CMD_OP_QUERY_LAG = 0x842,
        MLX5_CMD_OP_CREATE_TIR = 0x900,
        MLX5_CMD_OP_MODIFY_TIR = 0x901,
        MLX5_CMD_OP_CREATE_SQ = 0X904,
@@ -1507,7 +1508,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8 uar_4k[0x1];
        u8 reserved_at_241[0x9];
        u8 uar_sz[0x6];
-       u8 reserved_at_250[0x8];
+       u8 port_selection_cap[0x1];
+       u8 reserved_at_251[0x7];
        u8 log_pg_sz[0x8];
        u8 bf[0x1];
        u8 driver_version[0x1];
@@ -1974,6 +1976,14 @@ struct mlx5_ifc_query_nic_vport_context_in_bits {
        u8 reserved_at_68[0x18];
 };
 
+/*
+ * lag_tx_port_affinity: 0 auto-selection, 1 PF1, 2 PF2 vice versa.
+ * Each TIS binds to one PF by setting lag_tx_port_affinity (>0).
+ * Once LAG enabled, we create multiple TISs and bind each one to
+ * different PFs, then TIS[i] gets affinity i+1 and goes to PF i+1.
+ */
+#define MLX5_IFC_LAG_MAP_TIS_AFFINITY(index, num) ((num) ? \
+                                                   (index) % (num) + 1 : 0)
 struct mlx5_ifc_tisc_bits {
        u8 strict_lag_tx_port_affinity[0x1];
        u8 reserved_at_1[0x3];
@@ -2007,6 +2017,39 @@ struct mlx5_ifc_query_tis_in_bits {
        u8 reserved_at_60[0x20];
 };
 
+/* port_select_mode definition. */
+enum mlx5_lag_mode_type {
+       MLX5_LAG_MODE_TIS = 0,
+       MLX5_LAG_MODE_HASH = 1,
+};
+
+struct mlx5_ifc_lag_context_bits {
+       u8 fdb_selection_mode[0x1];
+       u8 reserved_at_1[0x14];
+       u8 port_select_mode[0x3];
+       u8 reserved_at_18[0x5];
+       u8 lag_state[0x3];
+       u8 reserved_at_20[0x14];
+       u8 tx_remap_affinity_2[0x4];
+       u8 reserved_at_38[0x4];
+       u8 tx_remap_affinity_1[0x4];
+};
+
+struct mlx5_ifc_query_lag_in_bits {
+       u8 opcode[0x10];
+       u8 uid[0x10];
+       u8 reserved_at_20[0x10];
+       u8 op_mod[0x10];
+       u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_lag_out_bits {
+       u8 status[0x8];
+       u8 reserved_at_8[0x18];
+       u8 syndrome[0x20];
+       struct mlx5_ifc_lag_context_bits context;
+};
+
 struct mlx5_ifc_alloc_transport_domain_out_bits {
        u8 status[0x8];
        u8 reserved_at_8[0x18];
index 7c95172..0ea8325 100644 (file)
@@ -52,6 +52,7 @@ INTERNAL {
        mlx5_devx_cmd_modify_virtq;
        mlx5_devx_cmd_qp_query_tis_td;
        mlx5_devx_cmd_query_hca_attr;
+       mlx5_devx_cmd_query_lag;
        mlx5_devx_cmd_query_parse_samples;
        mlx5_devx_cmd_query_virtio_q_counters; # WINDOWS_NO_EXPORT
        mlx5_devx_cmd_query_virtq;