X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_prm.h;h=a805363757ee63995156c0a4183ce5c80259516f;hb=4f19f4140e058c92822f228dcdc55c44bd88b613;hp=8c4238053726d018f5a68fad2d78f24473481a96;hpb=e2b4925ef7c11c4271b2c8fb46154d347cba26e2;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 8c42380537..a805363757 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -39,34 +39,92 @@ /* Invalidate a CQE. */ #define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4) -/* Maximum number of packets a multi-packet WQE can handle. */ -#define MLX5_MPW_DSEG_MAX 5 - -/* WQE DWORD size */ -#define MLX5_WQE_DWORD_SIZE 16 - -/* WQE size */ -#define MLX5_WQE_SIZE (4 * MLX5_WQE_DWORD_SIZE) - -/* Max size of a WQE session. */ -#define MLX5_WQE_SIZE_MAX 960U - -/* Compute the number of DS. */ -#define MLX5_WQE_DS(n) \ - (((n) + MLX5_WQE_DWORD_SIZE - 1) / MLX5_WQE_DWORD_SIZE) - -/* Room for inline data in multi-packet WQE. */ -#define MLX5_MWQE64_INL_DATA 28 - -/* Default minimum number of Tx queues for inlining packets. */ -#define MLX5_EMPW_MIN_TXQS 8 - -/* Default max packet length to be inlined. */ -#define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE) - +/* WQE Segment sizes in bytes. */ +#define MLX5_WSEG_SIZE 16u +#define MLX5_WQE_CSEG_SIZE sizeof(struct mlx5_wqe_cseg) +#define MLX5_WQE_DSEG_SIZE sizeof(struct mlx5_wqe_dseg) +#define MLX5_WQE_ESEG_SIZE sizeof(struct mlx5_wqe_eseg) + +/* WQE/WQEBB size in bytes. */ +#define MLX5_WQE_SIZE sizeof(struct mlx5_wqe) + +/* + * Max size of a WQE session. + * Absolute maximum size is 63 (MLX5_DSEG_MAX) segments, + * the WQE size field in Control Segment is 6 bits wide. + */ +#define MLX5_WQE_SIZE_MAX (60 * MLX5_WSEG_SIZE) + +/* + * Default minimum number of Tx queues for inlining packets. + * If there are less queues as specified we assume we have + * no enough CPU resources (cycles) to perform inlining, + * the PCIe throughput is not supposed as bottleneck and + * inlining is disabled. + */ +#define MLX5_INLINE_MAX_TXQS 8u +#define MLX5_INLINE_MAX_TXQS_BLUEFIELD 16u + +/* + * Default packet length threshold to be inlined with + * enhanced MPW. If packet length exceeds the threshold + * the data are not inlined. Should be aligned in WQEBB + * boundary with accounting the title Control and Ethernet + * segments. + */ +#define MLX5_EMPW_DEF_INLINE_LEN (4u * MLX5_WQE_SIZE + \ + MLX5_DSEG_MIN_INLINE_SIZE) +/* + * Maximal inline data length sent with enhanced MPW. + * Is based on maximal WQE size. + */ +#define MLX5_EMPW_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \ + MLX5_WQE_CSEG_SIZE - \ + MLX5_WQE_ESEG_SIZE - \ + MLX5_WQE_DSEG_SIZE + \ + MLX5_DSEG_MIN_INLINE_SIZE) +/* + * Minimal amount of packets to be sent with EMPW. + * This limits the minimal required size of sent EMPW. + * If there are no enough resources to built minimal + * EMPW the sending loop exits. + */ +#define MLX5_EMPW_MIN_PACKETS (2u + 3u * 4u) +/* + * Maximal amount of packets to be sent with EMPW. + * This value is not recommended to exceed MLX5_TX_COMP_THRESH, + * otherwise there might be up to MLX5_EMPW_MAX_PACKETS mbufs + * without CQE generation request, being multiplied by + * MLX5_TX_COMP_MAX_CQE it may cause significant latency + * in tx burst routine at the moment of freeing multiple mbufs. + */ +#define MLX5_EMPW_MAX_PACKETS MLX5_TX_COMP_THRESH +#define MLX5_MPW_MAX_PACKETS 6 +#define MLX5_MPW_INLINE_MAX_PACKETS 2 + +/* + * Default packet length threshold to be inlined with + * ordinary SEND. Inlining saves the MR key search + * and extra PCIe data fetch transaction, but eats the + * CPU cycles. + */ +#define MLX5_SEND_DEF_INLINE_LEN (5U * MLX5_WQE_SIZE + \ + MLX5_ESEG_MIN_INLINE_SIZE - \ + MLX5_WQE_CSEG_SIZE - \ + MLX5_WQE_ESEG_SIZE - \ + MLX5_WQE_DSEG_SIZE) +/* + * Maximal inline data length sent with ordinary SEND. + * Is based on maximal WQE size. + */ +#define MLX5_SEND_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \ + MLX5_WQE_CSEG_SIZE - \ + MLX5_WQE_ESEG_SIZE - \ + MLX5_WQE_DSEG_SIZE + \ + MLX5_ESEG_MIN_INLINE_SIZE) -#define MLX5_OPC_MOD_ENHANCED_MPSW 0 -#define MLX5_OPCODE_ENHANCED_MPSW 0x29 +/* Missed in mlv5dv.h, should define here. */ +#define MLX5_OPCODE_ENHANCED_MPSW 0x29u /* CQE value to inform that VLAN is stripped. */ #define MLX5_CQE_VLAN_STRIPPED (1u << 0) @@ -104,6 +162,21 @@ /* Tunnel packet bit in the CQE. */ #define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0) +/* Mask for LRO push flag in the CQE lro_tcppsh_abort_dupack field. */ +#define MLX5_CQE_LRO_PUSH_MASK 0x40 + +/* Mask for L4 type in the CQE hdr_type_etc field. */ +#define MLX5_CQE_L4_TYPE_MASK 0x70 + +/* The bit index of L4 type in CQE hdr_type_etc field. */ +#define MLX5_CQE_L4_TYPE_SHIFT 0x4 + +/* L4 type to indicate TCP packet without acknowledgment. */ +#define MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK 0x3 + +/* L4 type to indicate TCP packet with acknowledgment. */ +#define MLX5_L4_HDR_TYPE_TCP_WITH_ACL 0x4 + /* Inner L3 checksum offload (Tunneled packets only). */ #define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4) @@ -134,6 +207,12 @@ /* Inner L3 type is IPV6. */ #define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0) +/* VLAN insertion flag. */ +#define MLX5_ETH_WQE_VLAN_INSERT (1u << 31) + +/* Data inline segment flag. */ +#define MLX5_ETH_WQE_DATA_INLINE (1u << 31) + /* Is flow mark valid. */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00) @@ -150,49 +229,37 @@ /* Default mark value used when none is provided. */ #define MLX5_FLOW_MARK_DEFAULT 0xffffff -/* Maximum number of DS in WQE. */ +/* Default mark mask for metadata legacy mode. */ +#define MLX5_FLOW_MARK_MASK 0xffffff + +/* Maximum number of DS in WQE. Limited by 6-bit field. */ #define MLX5_DSEG_MAX 63 -/* Subset of struct mlx5_wqe_eth_seg. */ -struct mlx5_wqe_eth_seg_small { - uint32_t rsvd0; - uint8_t cs_flags; - uint8_t rsvd1; - uint16_t mss; - uint32_t flow_table_metadata; - uint16_t inline_hdr_sz; - uint8_t inline_hdr[2]; -} __rte_aligned(MLX5_WQE_DWORD_SIZE); +/* The completion mode offset in the WQE control segment line 2. */ +#define MLX5_COMP_MODE_OFFSET 2 -struct mlx5_wqe_inl_small { - uint32_t byte_cnt; - uint8_t raw; -} __rte_aligned(MLX5_WQE_DWORD_SIZE); +/* Amount of data bytes in minimal inline data segment. */ +#define MLX5_DSEG_MIN_INLINE_SIZE 12u -struct mlx5_wqe_ctrl { - uint32_t ctrl0; - uint32_t ctrl1; - uint32_t ctrl2; - uint32_t ctrl3; -} __rte_aligned(MLX5_WQE_DWORD_SIZE); +/* Amount of data bytes in minimal inline eth segment. */ +#define MLX5_ESEG_MIN_INLINE_SIZE 18u -/* Small common part of the WQE. */ -struct mlx5_wqe { - uint32_t ctrl[4]; - struct mlx5_wqe_eth_seg_small eseg; -}; +/* Amount of data bytes after eth data segment. */ +#define MLX5_ESEG_EXTRA_DATA_SIZE 32u -/* Vectorize WQE header. */ -struct mlx5_wqe_v { - rte_v128u32_t ctrl; - rte_v128u32_t eseg; -}; +/* The maximum log value of segments per RQ WQE. */ +#define MLX5_MAX_LOG_RQ_SEGS 5u -/* WQE. */ -struct mlx5_wqe64 { - struct mlx5_wqe hdr; - uint8_t raw[32]; -} __rte_aligned(MLX5_WQE_SIZE); +/* The alignment needed for WQ buffer. */ +#define MLX5_WQE_BUF_ALIGNMENT 512 + +/* Completion mode. */ +enum mlx5_completion_mode { + MLX5_COMP_ONLY_ERR = 0x0, + MLX5_COMP_ONLY_FIRST_ERR = 0x1, + MLX5_COMP_ALWAYS = 0x2, + MLX5_COMP_CQE_AND_EQE = 0x3, +}; /* MPW mode. */ enum mlx5_mpw_mode { @@ -201,26 +268,62 @@ enum mlx5_mpw_mode { MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */ }; -/* MPW session status. */ -enum mlx5_mpw_state { - MLX5_MPW_STATE_OPENED, - MLX5_MPW_INL_STATE_OPENED, - MLX5_MPW_ENHANCED_STATE_OPENED, - MLX5_MPW_STATE_CLOSED, -}; +/* WQE Control segment. */ +struct mlx5_wqe_cseg { + uint32_t opcode; + uint32_t sq_ds; + uint32_t flags; + uint32_t misc; +} __rte_packed __rte_aligned(MLX5_WSEG_SIZE); + +/* Header of data segment. Minimal size Data Segment */ +struct mlx5_wqe_dseg { + uint32_t bcount; + union { + uint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE]; + struct { + uint32_t lkey; + uint64_t pbuf; + } __rte_packed; + }; +} __rte_packed; -/* MPW session descriptor. */ -struct mlx5_mpw { - enum mlx5_mpw_state state; - unsigned int pkts_n; - unsigned int len; - unsigned int total_len; - volatile struct mlx5_wqe *wqe; +/* Subset of struct WQE Ethernet Segment. */ +struct mlx5_wqe_eseg { union { - volatile struct mlx5_wqe_data_seg *dseg[MLX5_MPW_DSEG_MAX]; - volatile uint8_t *raw; - } data; -}; + struct { + uint32_t swp_offs; + uint8_t cs_flags; + uint8_t swp_flags; + uint16_t mss; + uint32_t metadata; + uint16_t inline_hdr_sz; + union { + uint16_t inline_data; + uint16_t vlan_tag; + }; + } __rte_packed; + struct { + uint32_t offsets; + uint32_t flags; + uint32_t flow_metadata; + uint32_t inline_hdr; + } __rte_packed; + }; +} __rte_packed; + +/* The title WQEBB, header of WQE. */ +struct mlx5_wqe { + union { + struct mlx5_wqe_cseg cseg; + uint32_t ctrl[4]; + }; + struct mlx5_wqe_eseg eseg; + union { + struct mlx5_wqe_dseg dseg[2]; + uint8_t data[MLX5_ESEG_EXTRA_DATA_SIZE]; + }; +} __rte_packed; /* WQE for Multi-Packet RQ. */ struct mlx5_wqe_mprq { @@ -245,18 +348,26 @@ struct mlx5_cqe { uint8_t pkt_info; uint8_t rsvd0; uint16_t wqe_id; - uint8_t rsvd3[8]; + uint8_t lro_tcppsh_abort_dupack; + uint8_t lro_min_ttl; + uint16_t lro_tcp_win; + uint32_t lro_ack_seq_num; uint32_t rx_hash_res; uint8_t rx_hash_type; - uint8_t rsvd1[11]; + uint8_t rsvd1[3]; + uint16_t csum; + uint8_t rsvd2[6]; uint16_t hdr_type_etc; uint16_t vlan_info; - uint8_t rsvd2[12]; + uint8_t lro_num_seg; + uint8_t rsvd3[3]; + uint32_t flow_table_metadata; + uint8_t rsvd4[4]; uint32_t byte_cnt; uint64_t timestamp; uint32_t sop_drop_qpn; uint16_t wqe_counter; - uint8_t rsvd4; + uint8_t rsvd5; uint8_t op_own; }; @@ -280,14 +391,16 @@ struct mlx5_cqe { /* CQE format value. */ #define MLX5_COMPRESSED 0x3 -/* Write a specific data value to a field. */ -#define MLX5_MODIFICATION_TYPE_SET 1 - -/* Add a specific data value to a field. */ -#define MLX5_MODIFICATION_TYPE_ADD 2 +/* Action type of header modification. */ +enum { + MLX5_MODIFICATION_TYPE_SET = 0x1, + MLX5_MODIFICATION_TYPE_ADD = 0x2, + MLX5_MODIFICATION_TYPE_COPY = 0x3, +}; /* The field of packet to be modified. */ enum mlx5_modification_field { + MLX5_MODI_OUT_NONE = -1, MLX5_MODI_OUT_SMAC_47_16 = 1, MLX5_MODI_OUT_SMAC_15_0, MLX5_MODI_OUT_ETHERTYPE, @@ -310,6 +423,7 @@ enum mlx5_modification_field { MLX5_MODI_OUT_DIPV6_31_0, MLX5_MODI_OUT_SIPV4, MLX5_MODI_OUT_DIPV4, + MLX5_MODI_OUT_FIRST_VID, MLX5_MODI_IN_SMAC_47_16 = 0x31, MLX5_MODI_IN_SMAC_15_0, MLX5_MODI_IN_ETHERTYPE, @@ -336,6 +450,35 @@ enum mlx5_modification_field { MLX5_MODI_IN_IPV6_HOPLIMIT, MLX5_MODI_META_DATA_REG_A, MLX5_MODI_META_DATA_REG_B = 0x50, + MLX5_MODI_META_REG_C_0, + MLX5_MODI_META_REG_C_1, + MLX5_MODI_META_REG_C_2, + MLX5_MODI_META_REG_C_3, + MLX5_MODI_META_REG_C_4, + MLX5_MODI_META_REG_C_5, + MLX5_MODI_META_REG_C_6, + MLX5_MODI_META_REG_C_7, + MLX5_MODI_OUT_TCP_SEQ_NUM, + MLX5_MODI_IN_TCP_SEQ_NUM, + MLX5_MODI_OUT_TCP_ACK_NUM, + MLX5_MODI_IN_TCP_ACK_NUM = 0x5C, +}; + +/* Total number of metadata reg_c's. */ +#define MLX5_MREG_C_NUM (MLX5_MODI_META_REG_C_7 - MLX5_MODI_META_REG_C_0 + 1) + +enum modify_reg { + REG_NONE = 0, + REG_A, + REG_B, + REG_C_0, + REG_C_1, + REG_C_2, + REG_C_3, + REG_C_4, + REG_C_5, + REG_C_6, + REG_C_7, }; /* Modification sub command. */ @@ -354,6 +497,13 @@ struct mlx5_modification_cmd { union { uint32_t data1; uint8_t data[4]; + struct { + unsigned int rsvd2:8; + unsigned int dst_offset:5; + unsigned int rsvd3:3; + unsigned int dst_field:12; + unsigned int rsvd4:4; + }; }; }; @@ -378,7 +528,6 @@ typedef uint8_t u8; #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) -#define MLX5_ST_SZ_DB(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) @@ -393,6 +542,14 @@ typedef uint8_t u8; (((_v) & __mlx5_mask(typ, fld)) << \ __mlx5_dw_bit_off(typ, fld))); \ } while (0) + +#define MLX5_SET64(typ, p, fld, v) \ + do { \ + assert(__mlx5_bit_sz(typ, fld) == 64); \ + *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = \ + rte_cpu_to_be_64(v); \ + } while (0) + #define MLX5_GET(typ, p, fld) \ ((rte_be_to_cpu_32(*((__be32 *)(p) +\ __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ @@ -406,7 +563,11 @@ typedef uint8_t u8; #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) struct mlx5_ifc_fte_match_set_misc_bits { - u8 reserved_at_0[0x8]; + u8 gre_c_present[0x1]; + u8 reserved_at_1[0x1]; + u8 gre_k_present[0x1]; + u8 gre_s_present[0x1]; + u8 source_vhci_port[0x4]; u8 source_sqn[0x18]; u8 reserved_at_20[0x10]; u8 source_port[0x10]; @@ -426,12 +587,17 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 gre_key_l[0x8]; u8 vxlan_vni[0x18]; u8 reserved_at_b8[0x8]; - u8 reserved_at_c0[0x20]; + u8 geneve_vni[0x18]; + u8 reserved_at_e4[0x7]; + u8 geneve_oam[0x1]; u8 reserved_at_e0[0xc]; u8 outer_ipv6_flow_label[0x14]; u8 reserved_at_100[0xc]; u8 inner_ipv6_flow_label[0x14]; - u8 reserved_at_120[0xe0]; + u8 reserved_at_120[0xa]; + u8 geneve_opt_len[0x6]; + u8 geneve_protocol_type[0x10]; + u8 reserved_at_140[0xc0]; }; struct mlx5_ifc_ipv4_layout_bits { @@ -487,9 +653,17 @@ struct mlx5_ifc_fte_match_set_misc2_bits { struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls; struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre; struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp; - u8 reserved_at_80[0x100]; + u8 metadata_reg_c_7[0x20]; + u8 metadata_reg_c_6[0x20]; + u8 metadata_reg_c_5[0x20]; + u8 metadata_reg_c_4[0x20]; + u8 metadata_reg_c_3[0x20]; + u8 metadata_reg_c_2[0x20]; + u8 metadata_reg_c_1[0x20]; + u8 metadata_reg_c_0[0x20]; u8 metadata_reg_a[0x20]; - u8 reserved_at_1a0[0x60]; + u8 metadata_reg_b[0x20]; + u8 reserved_at_1c0[0x40]; }; struct mlx5_ifc_fte_match_set_misc3_bits { @@ -530,10 +704,25 @@ enum { enum { MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, + MLX5_CMD_OP_CREATE_MKEY = 0x200, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, + MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816, + MLX5_CMD_OP_CREATE_TIR = 0x900, + MLX5_CMD_OP_CREATE_SQ = 0X904, + MLX5_CMD_OP_MODIFY_SQ = 0X905, + MLX5_CMD_OP_CREATE_RQ = 0x908, + MLX5_CMD_OP_MODIFY_RQ = 0x909, + MLX5_CMD_OP_CREATE_TIS = 0x912, + MLX5_CMD_OP_QUERY_TIS = 0x915, + MLX5_CMD_OP_CREATE_RQT = 0x916, MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, }; +enum { + MLX5_MKC_ACCESS_MODE_MTT = 0x1, +}; + /* Flow counters. */ struct mlx5_ifc_alloc_flow_counter_out_bits { u8 status[0x8]; @@ -548,7 +737,9 @@ struct mlx5_ifc_alloc_flow_counter_in_bits { u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 flow_counter_id[0x20]; + u8 reserved_at_40[0x18]; + u8 flow_counter_bulk[0x8]; }; struct mlx5_ifc_dealloc_flow_counter_out_bits { @@ -585,16 +776,106 @@ struct mlx5_ifc_query_flow_counter_in_bits { u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x80]; + u8 reserved_at_40[0x20]; + u8 mkey[0x20]; + u8 address[0x40]; u8 clear[0x1]; - u8 reserved_at_c1[0xf]; - u8 num_of_counters[0x10]; + u8 dump_to_memory[0x1]; + u8 num_of_counters[0x1e]; u8 flow_counter_id[0x20]; }; +struct mlx5_ifc_mkc_bits { + u8 reserved_at_0[0x1]; + u8 free[0x1]; + u8 reserved_at_2[0x1]; + u8 access_mode_4_2[0x3]; + u8 reserved_at_6[0x7]; + u8 relaxed_ordering_write[0x1]; + u8 reserved_at_e[0x1]; + u8 small_fence_on_rdma_read_response[0x1]; + u8 umr_en[0x1]; + u8 a[0x1]; + u8 rw[0x1]; + u8 rr[0x1]; + u8 lw[0x1]; + u8 lr[0x1]; + u8 access_mode_1_0[0x2]; + u8 reserved_at_18[0x8]; + + u8 qpn[0x18]; + u8 mkey_7_0[0x8]; + + u8 reserved_at_40[0x20]; + + u8 length64[0x1]; + u8 bsf_en[0x1]; + u8 sync_umr[0x1]; + u8 reserved_at_63[0x2]; + u8 expected_sigerr_count[0x1]; + u8 reserved_at_66[0x1]; + u8 en_rinval[0x1]; + u8 pd[0x18]; + + u8 start_addr[0x40]; + + u8 len[0x40]; + + u8 bsf_octword_size[0x20]; + + u8 reserved_at_120[0x80]; + + u8 translations_octword_size[0x20]; + + u8 reserved_at_1c0[0x1b]; + u8 log_page_size[0x5]; + + u8 reserved_at_1e0[0x20]; +}; + +struct mlx5_ifc_create_mkey_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 mkey_index[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_mkey_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 pg_access[0x1]; + u8 reserved_at_61[0x1f]; + + struct mlx5_ifc_mkc_bits memory_key_mkey_entry; + + u8 reserved_at_280[0x80]; + + u8 translations_octword_actual_size[0x20]; + + u8 mkey_umem_id[0x20]; + + u8 mkey_umem_offset[0x40]; + + u8 reserved_at_380[0x500]; + + u8 klm_pas_mtt[][0x20]; +}; + enum { MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0 << 1, - MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP = 0xc << 1, + MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS = 0x1 << 1, + MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP = 0xc << 1, }; enum { @@ -602,6 +883,35 @@ enum { MLX5_HCA_CAP_OPMOD_GET_CUR = 1, }; +enum { + MLX5_CAP_INLINE_MODE_L2, + MLX5_CAP_INLINE_MODE_VPORT_CONTEXT, + MLX5_CAP_INLINE_MODE_NOT_REQUIRED, +}; + +enum { + MLX5_INLINE_MODE_NONE, + MLX5_INLINE_MODE_L2, + MLX5_INLINE_MODE_IP, + MLX5_INLINE_MODE_TCP_UDP, + MLX5_INLINE_MODE_RESERVED4, + MLX5_INLINE_MODE_INNER_L2, + MLX5_INLINE_MODE_INNER_IP, + MLX5_INLINE_MODE_INNER_TCP_UDP, +}; + +/* HCA bit masks indicating which Flex parser protocols are already enabled. */ +#define MLX5_HCA_FLEX_IPV4_OVER_VXLAN_ENABLED (1UL << 0) +#define MLX5_HCA_FLEX_IPV6_OVER_VXLAN_ENABLED (1UL << 1) +#define MLX5_HCA_FLEX_IPV6_OVER_IP_ENABLED (1UL << 2) +#define MLX5_HCA_FLEX_GENEVE_ENABLED (1UL << 3) +#define MLX5_HCA_FLEX_CW_MPLS_OVER_GRE_ENABLED (1UL << 4) +#define MLX5_HCA_FLEX_CW_MPLS_OVER_UDP_ENABLED (1UL << 5) +#define MLX5_HCA_FLEX_P_BIT_VXLAN_GPE_ENABLED (1UL << 6) +#define MLX5_HCA_FLEX_VXLAN_GPE_ENABLED (1UL << 7) +#define MLX5_HCA_FLEX_ICMP_ENABLED (1UL << 8) +#define MLX5_HCA_FLEX_ICMPV6_ENABLED (1UL << 9) + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x30]; u8 vhca_id[0x10]; @@ -790,7 +1100,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_343[0x5]; u8 log_max_flow_counter_bulk[0x8]; u8 max_flow_counter_15_0[0x10]; - u8 reserved_at_360[0x3]; + u8 modify_tis[0x1]; + u8 flow_counters_dump[0x1]; + u8 reserved_at_360[0x1]; u8 log_max_rq[0x5]; u8 reserved_at_368[0x3]; u8 log_max_sq[0x5]; @@ -897,8 +1209,48 @@ struct mlx5_ifc_qos_cap_bits { u8 reserved_at_100[0x6e8]; }; +struct mlx5_ifc_per_protocol_networking_offload_caps_bits { + u8 csum_cap[0x1]; + u8 vlan_cap[0x1]; + u8 lro_cap[0x1]; + u8 lro_psh_flag[0x1]; + u8 lro_time_stamp[0x1]; + u8 lro_max_msg_sz_mode[0x2]; + u8 wqe_vlan_insert[0x1]; + u8 self_lb_en_modifiable[0x1]; + u8 self_lb_mc[0x1]; + u8 self_lb_uc[0x1]; + u8 max_lso_cap[0x5]; + u8 multi_pkt_send_wqe[0x2]; + u8 wqe_inline_mode[0x2]; + u8 rss_ind_tbl_cap[0x4]; + u8 reg_umr_sq[0x1]; + u8 scatter_fcs[0x1]; + u8 enhanced_multi_pkt_send_wqe[0x1]; + u8 tunnel_lso_const_out_ip_id[0x1]; + u8 tunnel_lro_gre[0x1]; + u8 tunnel_lro_vxlan[0x1]; + u8 tunnel_stateless_gre[0x1]; + u8 tunnel_stateless_vxlan[0x1]; + u8 swp[0x1]; + u8 swp_csum[0x1]; + u8 swp_lso[0x1]; + u8 reserved_at_23[0xd]; + u8 max_vxlan_udp_ports[0x8]; + u8 reserved_at_38[0x6]; + u8 max_geneve_opt_len[0x1]; + u8 tunnel_stateless_geneve_rx[0x1]; + u8 reserved_at_40[0x10]; + u8 lro_min_mss_size[0x10]; + u8 reserved_at_60[0x120]; + u8 lro_timer_supported_periods[4][0x20]; + u8 reserved_at_200[0x600]; +}; + union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; + struct mlx5_ifc_per_protocol_networking_offload_caps_bits + per_protocol_networking_offload_caps; struct mlx5_ifc_qos_cap_bits qos_cap; u8 reserved_at_0[0x8000]; }; @@ -919,6 +1271,515 @@ struct mlx5_ifc_query_hca_cap_in_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_mac_address_layout_bits { + u8 reserved_at_0[0x10]; + u8 mac_addr_47_32[0x10]; + u8 mac_addr_31_0[0x20]; +}; + +struct mlx5_ifc_nic_vport_context_bits { + u8 reserved_at_0[0x5]; + u8 min_wqe_inline_mode[0x3]; + u8 reserved_at_8[0x15]; + u8 disable_mc_local_lb[0x1]; + u8 disable_uc_local_lb[0x1]; + u8 roce_en[0x1]; + u8 arm_change_event[0x1]; + u8 reserved_at_21[0x1a]; + u8 event_on_mtu[0x1]; + u8 event_on_promisc_change[0x1]; + u8 event_on_vlan_change[0x1]; + u8 event_on_mc_address_change[0x1]; + u8 event_on_uc_address_change[0x1]; + u8 reserved_at_40[0xc]; + u8 affiliation_criteria[0x4]; + u8 affiliated_vhca_id[0x10]; + u8 reserved_at_60[0xd0]; + u8 mtu[0x10]; + u8 system_image_guid[0x40]; + u8 port_guid[0x40]; + u8 node_guid[0x40]; + u8 reserved_at_200[0x140]; + u8 qkey_violation_counter[0x10]; + u8 reserved_at_350[0x430]; + u8 promisc_uc[0x1]; + u8 promisc_mc[0x1]; + u8 promisc_all[0x1]; + u8 reserved_at_783[0x2]; + u8 allowed_list_type[0x3]; + u8 reserved_at_788[0xc]; + u8 allowed_list_size[0xc]; + struct mlx5_ifc_mac_address_layout_bits permanent_address; + u8 reserved_at_7e0[0x20]; +}; + +struct mlx5_ifc_query_nic_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x40]; + struct mlx5_ifc_nic_vport_context_bits nic_vport_context; +}; + +struct mlx5_ifc_query_nic_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + u8 reserved_at_60[0x5]; + u8 allowed_list_type[0x3]; + u8 reserved_at_68[0x18]; +}; + +struct mlx5_ifc_tisc_bits { + u8 strict_lag_tx_port_affinity[0x1]; + u8 reserved_at_1[0x3]; + u8 lag_tx_port_affinity[0x04]; + u8 reserved_at_8[0x4]; + u8 prio[0x4]; + u8 reserved_at_10[0x10]; + u8 reserved_at_20[0x100]; + u8 reserved_at_120[0x8]; + u8 transport_domain[0x18]; + u8 reserved_at_140[0x8]; + u8 underlay_qpn[0x18]; + u8 reserved_at_160[0x3a0]; +}; + +struct mlx5_ifc_query_tis_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x40]; + struct mlx5_ifc_tisc_bits tis_context; +}; + +struct mlx5_ifc_query_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 reserved_at_40[0x8]; + u8 tisn[0x18]; + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_transport_domain_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x8]; + u8 transport_domain[0x18]; + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_transport_domain_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_WQ_TYPE_LINKED_LIST = 0x0, + MLX5_WQ_TYPE_CYCLIC = 0x1, + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2, + MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ = 0x3, +}; + +enum { + MLX5_WQ_END_PAD_MODE_NONE = 0x0, + MLX5_WQ_END_PAD_MODE_ALIGN = 0x1, +}; + +struct mlx5_ifc_wq_bits { + u8 wq_type[0x4]; + u8 wq_signature[0x1]; + u8 end_padding_mode[0x2]; + u8 cd_slave[0x1]; + u8 reserved_at_8[0x18]; + u8 hds_skip_first_sge[0x1]; + u8 log2_hds_buf_size[0x3]; + u8 reserved_at_24[0x7]; + u8 page_offset[0x5]; + u8 lwm[0x10]; + u8 reserved_at_40[0x8]; + u8 pd[0x18]; + u8 reserved_at_60[0x8]; + u8 uar_page[0x18]; + u8 dbr_addr[0x40]; + u8 hw_counter[0x20]; + u8 sw_counter[0x20]; + u8 reserved_at_100[0xc]; + u8 log_wq_stride[0x4]; + u8 reserved_at_110[0x3]; + u8 log_wq_pg_sz[0x5]; + u8 reserved_at_118[0x3]; + u8 log_wq_sz[0x5]; + u8 dbr_umem_valid[0x1]; + u8 wq_umem_valid[0x1]; + u8 reserved_at_122[0x1]; + u8 log_hairpin_num_packets[0x5]; + u8 reserved_at_128[0x3]; + u8 log_hairpin_data_sz[0x5]; + u8 reserved_at_130[0x4]; + u8 single_wqe_log_num_of_strides[0x4]; + u8 two_byte_shift_en[0x1]; + u8 reserved_at_139[0x4]; + u8 single_stride_log_num_of_bytes[0x3]; + u8 dbr_umem_id[0x20]; + u8 wq_umem_id[0x20]; + u8 wq_umem_offset[0x40]; + u8 reserved_at_1c0[0x440]; +}; + +enum { + MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, + MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1, +}; + +enum { + MLX5_RQC_STATE_RST = 0x0, + MLX5_RQC_STATE_RDY = 0x1, + MLX5_RQC_STATE_ERR = 0x3, +}; + +struct mlx5_ifc_rqc_bits { + u8 rlky[0x1]; + u8 delay_drop_en[0x1]; + u8 scatter_fcs[0x1]; + u8 vsd[0x1]; + u8 mem_rq_type[0x4]; + u8 state[0x4]; + u8 reserved_at_c[0x1]; + u8 flush_in_error_en[0x1]; + u8 hairpin[0x1]; + u8 reserved_at_f[0x11]; + u8 reserved_at_20[0x8]; + u8 user_index[0x18]; + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + u8 counter_set_id[0x8]; + u8 reserved_at_68[0x18]; + u8 reserved_at_80[0x8]; + u8 rmpn[0x18]; + u8 reserved_at_a0[0x8]; + u8 hairpin_peer_sq[0x18]; + u8 reserved_at_c0[0x10]; + u8 hairpin_peer_vhca[0x10]; + u8 reserved_at_e0[0xa0]; + struct mlx5_ifc_wq_bits wq; /* Not used in LRO RQ. */ +}; + +struct mlx5_ifc_create_rq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x8]; + u8 rqn[0x18]; + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_rq_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 reserved_at_40[0xc0]; + struct mlx5_ifc_rqc_bits ctx; +}; + +struct mlx5_ifc_modify_rq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_create_tis_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x8]; + u8 tisn[0x18]; + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_tis_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 reserved_at_40[0xc0]; + struct mlx5_ifc_tisc_bits ctx; +}; + +enum { + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM = 1ULL << 0, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3, +}; + +struct mlx5_ifc_modify_rq_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 rq_state[0x4]; + u8 reserved_at_44[0x4]; + u8 rqn[0x18]; + u8 reserved_at_60[0x20]; + u8 modify_bitmask[0x40]; + u8 reserved_at_c0[0x40]; + struct mlx5_ifc_rqc_bits ctx; +}; + +enum { + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4, +}; + +struct mlx5_ifc_rx_hash_field_select_bits { + u8 l3_prot_type[0x1]; + u8 l4_prot_type[0x1]; + u8 selected_fields[0x1e]; +}; + +enum { + MLX5_TIRC_DISP_TYPE_DIRECT = 0x0, + MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1, +}; + +enum { + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, +}; + +enum { + MLX5_RX_HASH_FN_NONE = 0x0, + MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1, + MLX5_RX_HASH_FN_TOEPLITZ = 0x2, +}; + +enum { + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2, +}; + +enum { + MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 = 0x0, + MLX5_LRO_MAX_MSG_SIZE_START_FROM_L2 = 0x1, +}; + +struct mlx5_ifc_tirc_bits { + u8 reserved_at_0[0x20]; + u8 disp_type[0x4]; + u8 reserved_at_24[0x1c]; + u8 reserved_at_40[0x40]; + u8 reserved_at_80[0x4]; + u8 lro_timeout_period_usecs[0x10]; + u8 lro_enable_mask[0x4]; + u8 lro_max_msg_sz[0x8]; + u8 reserved_at_a0[0x40]; + u8 reserved_at_e0[0x8]; + u8 inline_rqn[0x18]; + u8 rx_hash_symmetric[0x1]; + u8 reserved_at_101[0x1]; + u8 tunneled_offload_en[0x1]; + u8 reserved_at_103[0x5]; + u8 indirect_table[0x18]; + u8 rx_hash_fn[0x4]; + u8 reserved_at_124[0x2]; + u8 self_lb_block[0x2]; + u8 transport_domain[0x18]; + u8 rx_hash_toeplitz_key[10][0x20]; + struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer; + struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; + u8 reserved_at_2c0[0x4c0]; +}; + +struct mlx5_ifc_create_tir_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x8]; + u8 tirn[0x18]; + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_tir_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 reserved_at_40[0xc0]; + struct mlx5_ifc_tirc_bits ctx; +}; + +struct mlx5_ifc_rq_num_bits { + u8 reserved_at_0[0x8]; + u8 rq_num[0x18]; +}; + +struct mlx5_ifc_rqtc_bits { + u8 reserved_at_0[0xa0]; + u8 reserved_at_a0[0x10]; + u8 rqt_max_size[0x10]; + u8 reserved_at_c0[0x10]; + u8 rqt_actual_size[0x10]; + u8 reserved_at_e0[0x6a0]; + struct mlx5_ifc_rq_num_bits rq_num[]; +}; + +struct mlx5_ifc_create_rqt_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x8]; + u8 rqtn[0x18]; + u8 reserved_at_60[0x20]; +}; + +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +struct mlx5_ifc_create_rqt_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 reserved_at_40[0xc0]; + struct mlx5_ifc_rqtc_bits rqt_context; +}; +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +enum { + MLX5_SQC_STATE_RST = 0x0, + MLX5_SQC_STATE_RDY = 0x1, + MLX5_SQC_STATE_ERR = 0x3, +}; + +struct mlx5_ifc_sqc_bits { + u8 rlky[0x1]; + u8 cd_master[0x1]; + u8 fre[0x1]; + u8 flush_in_error_en[0x1]; + u8 allow_multi_pkt_send_wqe[0x1]; + u8 min_wqe_inline_mode[0x3]; + u8 state[0x4]; + u8 reg_umr[0x1]; + u8 allow_swp[0x1]; + u8 hairpin[0x1]; + u8 reserved_at_f[0x11]; + u8 reserved_at_20[0x8]; + u8 user_index[0x18]; + u8 reserved_at_40[0x8]; + u8 cqn[0x18]; + u8 reserved_at_60[0x8]; + u8 hairpin_peer_rq[0x18]; + u8 reserved_at_80[0x10]; + u8 hairpin_peer_vhca[0x10]; + u8 reserved_at_a0[0x50]; + u8 packet_pacing_rate_limit_index[0x10]; + u8 tis_lst_sz[0x10]; + u8 reserved_at_110[0x10]; + u8 reserved_at_120[0x40]; + u8 reserved_at_160[0x8]; + u8 tis_num_0[0x18]; + struct mlx5_ifc_wq_bits wq; +}; + +struct mlx5_ifc_query_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 reserved_at_40[0x8]; + u8 sqn[0x18]; + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_modify_sq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_modify_sq_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 sq_state[0x4]; + u8 reserved_at_44[0x4]; + u8 sqn[0x18]; + u8 reserved_at_60[0x20]; + u8 modify_bitmask[0x40]; + u8 reserved_at_c0[0x40]; + struct mlx5_ifc_sqc_bits ctx; +}; + +struct mlx5_ifc_create_sq_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x8]; + u8 sqn[0x18]; + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_create_sq_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 reserved_at_40[0xc0]; + struct mlx5_ifc_sqc_bits ctx; +}; + +enum { + MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE = (1ULL << 0), + MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS = (1ULL << 1), + MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR = (1ULL << 2), + MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EBS = (1ULL << 3), + MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EIR = (1ULL << 4), +}; + +struct mlx5_ifc_flow_meter_parameters_bits { + u8 valid[0x1]; // 00h + u8 bucket_overflow[0x1]; + u8 start_color[0x2]; + u8 both_buckets_on_green[0x1]; + u8 meter_mode[0x2]; + u8 reserved_at_1[0x19]; + u8 reserved_at_2[0x20]; //04h + u8 reserved_at_3[0x3]; + u8 cbs_exponent[0x5]; // 08h + u8 cbs_mantissa[0x8]; + u8 reserved_at_4[0x3]; + u8 cir_exponent[0x5]; + u8 cir_mantissa[0x8]; + u8 reserved_at_5[0x20]; // 0Ch + u8 reserved_at_6[0x3]; + u8 ebs_exponent[0x5]; // 10h + u8 ebs_mantissa[0x8]; + u8 reserved_at_7[0x3]; + u8 eir_exponent[0x5]; + u8 eir_mantissa[0x8]; + u8 reserved_at_8[0x60]; // 14h-1Ch +}; + /* CQE format mask. */ #define MLX5E_CQE_FORMAT_MASK 0xc @@ -942,6 +1803,19 @@ struct mlx5_mini_cqe8 { uint32_t byte_cnt; }; +/* srTCM PRM flow meter parameters. */ +enum { + MLX5_FLOW_COLOR_RED = 0, + MLX5_FLOW_COLOR_YELLOW, + MLX5_FLOW_COLOR_GREEN, + MLX5_FLOW_COLOR_UNDEFINED, +}; + +/* Maximum value of srTCM metering parameters. */ +#define MLX5_SRTCM_CBS_MAX (0xFF * (1ULL << 0x1F)) +#define MLX5_SRTCM_CIR_MAX (8 * (1ULL << 30) * 0xFF) +#define MLX5_SRTCM_EBS_MAX 0 + /** * Convert a user mark to flow mark. *