net/mlx5: fix tunneling support query
authorTal Shnaiderman <talshn@nvidia.com>
Tue, 12 Oct 2021 12:45:45 +0000 (15:45 +0300)
committerRaslan Darawsheh <rasland@nvidia.com>
Tue, 12 Oct 2021 13:29:34 +0000 (15:29 +0200)
Currently, the PMD decides if the tunneling offload
can enable VXLAN/GRE/GENEVE tunneled TSO support by checking
config->tunnel_en (single bit) and config->tso.

This is incorrect, the right way is to check the following
flags returned by the mlx5dv_query_device function:

MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN - if supported the offload
DEV_TX_OFFLOAD_VXLAN_TNL_TSO can be enabled.
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE - if supported the offload
DEV_TX_OFFLOAD_GRE_TNL_TSO can be enabled.
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE - if supported the offload
DEV_TX_OFFLOAD_GENEVE_TNL_TSO can be enabled.

The fix enables the offloads according to the correct
flags returned by the kernel.

Fixes: dbccb4cddcd2 ("net/mlx5: convert to new Tx offloads API")
Cc: stable@dpdk.org
Signed-off-by: Tal Shnaiderman <talshn@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Tested-by: Idan Hackmon <idanhac@nvidia.com>
drivers/net/mlx5/linux/mlx5_os.c
drivers/net/mlx5/linux/mlx5_os.h
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_txq.c
drivers/net/mlx5/windows/mlx5_os.h

index e08082e..26a8d75 100644 (file)
@@ -963,7 +963,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        int err = 0;
        unsigned int hw_padding = 0;
        unsigned int mps;
-       unsigned int tunnel_en = 0;
        unsigned int mpls_en = 0;
        unsigned int swp = 0;
        unsigned int mprq = 0;
@@ -1144,20 +1143,27 @@ err_secondary:
        config->cqe_comp = 1;
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
        if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
-               tunnel_en = ((dv_attr.tunnel_offloads_caps &
-                             MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
-                            (dv_attr.tunnel_offloads_caps &
-                             MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) &&
-                            (dv_attr.tunnel_offloads_caps &
-                             MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE));
-       }
-       DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
-               tunnel_en ? "" : "not ");
+               config->tunnel_en = dv_attr.tunnel_offloads_caps &
+                            (MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN |
+                             MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE |
+                             MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE);
+       }
+       if (config->tunnel_en) {
+               DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
+               config->tunnel_en &
+               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN ? "[VXLAN]" : "",
+               config->tunnel_en &
+               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE ? "[GRE]" : "",
+               config->tunnel_en &
+               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE ? "[GENEVE]" : ""
+               );
+       } else {
+               DRV_LOG(DEBUG, "tunnel offloading is not supported");
+       }
 #else
        DRV_LOG(WARNING,
                "tunnel offloading disabled due to old OFED/rdma-core version");
 #endif
-       config->tunnel_en = tunnel_en;
 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
        mpls_en = ((dv_attr.tunnel_offloads_caps &
                    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
index da036ed..80c70d7 100644 (file)
@@ -33,4 +33,19 @@ enum mlx5_sw_parsing_offloads {
        MLX5_SW_PARSING_TSO_CAP  = 0,
 #endif
 };
+
+enum mlx5_tunnel_offloads {
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+       MLX5_TUNNELED_OFFLOADS_VXLAN_CAP  =
+               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN,
+       MLX5_TUNNELED_OFFLOADS_GRE_CAP    =
+               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE,
+       MLX5_TUNNELED_OFFLOADS_GENEVE_CAP =
+               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE,
+#else
+       MLX5_TUNNELED_OFFLOADS_VXLAN_CAP  = 0,
+       MLX5_TUNNELED_OFFLOADS_GRE_CAP    = 0,
+       MLX5_TUNNELED_OFFLOADS_GENEVE_CAP = 0,
+#endif
+};
 #endif /* RTE_PMD_MLX5_OS_H_ */
index 483bf35..6dde621 100644 (file)
@@ -251,7 +251,7 @@ struct mlx5_dev_config {
        unsigned int hw_padding:1; /* End alignment padding is supported. */
        unsigned int vf:1; /* This is a VF. */
        unsigned int sf:1; /* This is a SF. */
-       unsigned int tunnel_en:1;
+       unsigned int tunnel_en:3;
        /* Whether tunnel stateless offloads are supported. */
        unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
        unsigned int cqe_comp:1; /* CQE compression is enabled. */
index eb26367..1f92250 100644 (file)
@@ -120,10 +120,17 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
        if (config->tunnel_en) {
                if (config->hw_csum)
                        offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-               if (config->tso)
-                       offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-                                    DEV_TX_OFFLOAD_GRE_TNL_TSO |
-                                    DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+               if (config->tso) {
+                       if (config->tunnel_en &
+                               MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
+                               offloads |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+                       if (config->tunnel_en &
+                               MLX5_TUNNELED_OFFLOADS_GRE_CAP)
+                               offloads |= DEV_TX_OFFLOAD_GRE_TNL_TSO;
+                       if (config->tunnel_en &
+                               MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
+                               offloads |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+               }
        }
        if (!config->mprq.enabled)
                offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
@@ -971,7 +978,14 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
                                                    MLX5_MAX_TSO_HEADER);
                txq_ctrl->txq.tso_en = 1;
        }
-       txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
+       if (((DEV_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
+           (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
+          ((DEV_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
+           (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
+          ((DEV_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
+           (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
+          (config->swp  & MLX5_SW_PARSING_TSO_CAP))
+               txq_ctrl->txq.tunnel_en = 1;
        txq_ctrl->txq.swp_en = (((DEV_TX_OFFLOAD_IP_TNL_TSO |
                                  DEV_TX_OFFLOAD_UDP_TNL_TSO) &
                                  txq_ctrl->txq.offloads) && (config->swp &
index 6de6833..8b58265 100644 (file)
@@ -22,4 +22,10 @@ enum mlx5_sw_parsing_offloads {
        MLX5_SW_PARSING_TSO_CAP =  1 << 2,
 };
 
+enum mlx5_tunnel_offloads {
+       MLX5_TUNNELED_OFFLOADS_VXLAN_CAP  = 1 << 0,
+       MLX5_TUNNELED_OFFLOADS_GRE_CAP    = 1 << 1,
+       MLX5_TUNNELED_OFFLOADS_GENEVE_CAP = 1 << 2,
+};
+
 #endif /* RTE_PMD_MLX5_OS_H_ */