Currently, the PMD decides if the tunneling offload
can enable VXLAN/GRE/GENEVE tunneled TSO support by checking
config->tunnel_en (single bit) and config->tso.
This is incorrect, the right way is to check the following
flags returned by the mlx5dv_query_device function:
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN - if supported the offload
DEV_TX_OFFLOAD_VXLAN_TNL_TSO can be enabled.
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE - if supported the offload
DEV_TX_OFFLOAD_GRE_TNL_TSO can be enabled.
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE - if supported the offload
DEV_TX_OFFLOAD_GENEVE_TNL_TSO can be enabled.
The fix enables the offloads according to the correct
flags returned by the kernel.
Fixes:
dbccb4cddcd2 ("net/mlx5: convert to new Tx offloads API")
Cc: stable@dpdk.org
Signed-off-by: Tal Shnaiderman <talshn@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Tested-by: Idan Hackmon <idanhac@nvidia.com>
int err = 0;
unsigned int hw_padding = 0;
unsigned int mps;
- unsigned int tunnel_en = 0;
unsigned int mpls_en = 0;
unsigned int swp = 0;
unsigned int mprq = 0;
config->cqe_comp = 1;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
- tunnel_en = ((dv_attr.tunnel_offloads_caps &
- MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
- (dv_attr.tunnel_offloads_caps &
- MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) &&
- (dv_attr.tunnel_offloads_caps &
- MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE));
- }
- DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
- tunnel_en ? "" : "not ");
+ config->tunnel_en = dv_attr.tunnel_offloads_caps &
+ (MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN |
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE |
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE);
+ }
+ if (config->tunnel_en) {
+ DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
+ config->tunnel_en &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN ? "[VXLAN]" : "",
+ config->tunnel_en &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE ? "[GRE]" : "",
+ config->tunnel_en &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE ? "[GENEVE]" : ""
+ );
+ } else {
+ DRV_LOG(DEBUG, "tunnel offloading is not supported");
+ }
#else
DRV_LOG(WARNING,
"tunnel offloading disabled due to old OFED/rdma-core version");
#endif
- config->tunnel_en = tunnel_en;
#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
mpls_en = ((dv_attr.tunnel_offloads_caps &
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
MLX5_SW_PARSING_TSO_CAP = 0,
#endif
};
+
+enum mlx5_tunnel_offloads {
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ MLX5_TUNNELED_OFFLOADS_VXLAN_CAP =
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN,
+ MLX5_TUNNELED_OFFLOADS_GRE_CAP =
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE,
+ MLX5_TUNNELED_OFFLOADS_GENEVE_CAP =
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE,
+#else
+ MLX5_TUNNELED_OFFLOADS_VXLAN_CAP = 0,
+ MLX5_TUNNELED_OFFLOADS_GRE_CAP = 0,
+ MLX5_TUNNELED_OFFLOADS_GENEVE_CAP = 0,
+#endif
+};
#endif /* RTE_PMD_MLX5_OS_H_ */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int vf:1; /* This is a VF. */
unsigned int sf:1; /* This is a SF. */
- unsigned int tunnel_en:1;
+ unsigned int tunnel_en:3;
/* Whether tunnel stateless offloads are supported. */
unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
if (config->tunnel_en) {
if (config->hw_csum)
offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
- if (config->tso)
- offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ if (config->tso) {
+ if (config->tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
+ offloads |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+ if (config->tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_GRE_CAP)
+ offloads |= DEV_TX_OFFLOAD_GRE_TNL_TSO;
+ if (config->tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
+ offloads |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ }
}
if (!config->mprq.enabled)
offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
MLX5_MAX_TSO_HEADER);
txq_ctrl->txq.tso_en = 1;
}
- txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
+ if (((DEV_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
+ (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
+ ((DEV_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
+ (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
+ ((DEV_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
+ (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
+ (config->swp & MLX5_SW_PARSING_TSO_CAP))
+ txq_ctrl->txq.tunnel_en = 1;
txq_ctrl->txq.swp_en = (((DEV_TX_OFFLOAD_IP_TNL_TSO |
DEV_TX_OFFLOAD_UDP_TNL_TSO) &
txq_ctrl->txq.offloads) && (config->swp &
MLX5_SW_PARSING_TSO_CAP = 1 << 2,
};
+enum mlx5_tunnel_offloads {
+ MLX5_TUNNELED_OFFLOADS_VXLAN_CAP = 1 << 0,
+ MLX5_TUNNELED_OFFLOADS_GRE_CAP = 1 << 1,
+ MLX5_TUNNELED_OFFLOADS_GENEVE_CAP = 1 << 2,
+};
+
#endif /* RTE_PMD_MLX5_OS_H_ */