uint32_t tunnel_lro_vxlan:1;
uint32_t lro_max_msg_sz_mode:2;
uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS];
+ uint32_t flex_parser_protocols;
};
/* Flow list . */
attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,
eth_net_offloads);
attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);
+ attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,
+ flex_parser_protocols);
if (!attr->eth_net_offloads)
return 0;
MLX5_INLINE_MODE_INNER_TCP_UDP,
};
+/* HCA bit masks indicating which Flex parser protocols are already enabled. */
+#define MLX5_HCA_FLEX_IPV4_OVER_VXLAN_ENABLED (1UL << 0)
+#define MLX5_HCA_FLEX_IPV6_OVER_VXLAN_ENABLED (1UL << 1)
+#define MLX5_HCA_FLEX_IPV6_OVER_IP_ENABLED (1UL << 2)
+#define MLX5_HCA_FLEX_GENEVE_ENABLED (1UL << 3)
+#define MLX5_HCA_FLEX_CW_MPLS_OVER_GRE_ENABLED (1UL << 4)
+#define MLX5_HCA_FLEX_CW_MPLS_OVER_UDP_ENABLED (1UL << 5)
+#define MLX5_HCA_FLEX_P_BIT_VXLAN_GPE_ENABLED (1UL << 6)
+#define MLX5_HCA_FLEX_VXLAN_GPE_ENABLED (1UL << 7)
+#define MLX5_HCA_FLEX_ICMP_ENABLED (1UL << 8)
+#define MLX5_HCA_FLEX_ICMPV6_ENABLED (1UL << 9)
+
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x30];
u8 vhca_id[0x10];