net/mlx5: query tunneling support on Windows
authorTal Shnaiderman <talshn@nvidia.com>
Tue, 12 Oct 2021 12:45:47 +0000 (15:45 +0300)
committerRaslan Darawsheh <rasland@nvidia.com>
Tue, 12 Oct 2021 13:29:36 +0000 (15:29 +0200)
Query tunneling supported on the NIC.

Save the offloads values in a config parameter.
This is needed for the following TSO support:

DEV_TX_OFFLOAD_VXLAN_TNL_TSO
DEV_TX_OFFLOAD_GRE_TNL_TSO
DEV_TX_OFFLOAD_GENEVE_TNL_TSO

Signed-off-by: Tal Shnaiderman <talshn@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Tested-by: Idan Hackmon <idanhac@nvidia.com>
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/windows/mlx5_os.c

index 859637f..c712fc3 100644 (file)
@@ -969,6 +969,20 @@ mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr)
        return sw_parsing_offloads;
 }
 
+uint32_t
+mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr)
+{
+       uint32_t tn_offloads = 0;
+
+       if (attr->tunnel_stateless_vxlan)
+               tn_offloads |= MLX5_TUNNELED_OFFLOADS_VXLAN_CAP;
+       if (attr->tunnel_stateless_gre)
+               tn_offloads |= MLX5_TUNNELED_OFFLOADS_GRE_CAP;
+       if (attr->tunnel_stateless_geneve_rx)
+               tn_offloads |= MLX5_TUNNELED_OFFLOADS_GENEVE_CAP;
+       return tn_offloads;
+}
+
 /*
  * Allocate Rx and Tx UARs in robust fashion.
  * This routine handles the following UAR allocation issues:
index 6dde621..68acc01 100644 (file)
@@ -1831,5 +1831,7 @@ int mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
                          struct mlx5_aso_ct_action *ct);
 uint32_t
 mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr);
+uint32_t
+mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr);
 
 #endif /* RTE_PMD_MLX5_H_ */
index fefc648..1eaf261 100644 (file)
@@ -171,6 +171,8 @@ mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
        }
        device_attr->sw_parsing_offloads =
                mlx5_get_supported_sw_parsing_offloads(&hca_attr);
+       device_attr->tunnel_offloads_caps =
+               mlx5_get_supported_tunneling_offloads(&hca_attr);
        pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
        if (pv_iseg == NULL) {
                DRV_LOG(ERR, "Failed to get device hca_iseg");
@@ -402,8 +404,22 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                sh->device_attr.max_rwq_indirection_table_size;
        cqe_comp = 0;
        config->cqe_comp = cqe_comp;
-       DRV_LOG(DEBUG, "tunnel offloading is not supported");
-       config->tunnel_en = 0;
+       config->tunnel_en = device_attr.tunnel_offloads_caps &
+               (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
+                MLX5_TUNNELED_OFFLOADS_GRE_CAP |
+                MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
+       if (config->tunnel_en) {
+               DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
+               config->tunnel_en &
+               MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
+               config->tunnel_en &
+               MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
+               config->tunnel_en &
+               MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : ""
+               );
+       } else {
+               DRV_LOG(DEBUG, "tunnel offloading is not supported");
+       }
        DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported");
        config->mpls_en = 0;
        /* Allocate private eth device data. */