net/mlx5: introduce hardware steering operation
authorSuanming Mou <suanmingm@nvidia.com>
Thu, 24 Feb 2022 13:40:38 +0000 (15:40 +0200)
committerRaslan Darawsheh <rasland@nvidia.com>
Thu, 24 Feb 2022 21:10:15 +0000 (22:10 +0100)
The Connect-X steering is a lookup hardware mechanism that accesses flow
tables, matches packets to the rules, and performs specified actions.
Historically, mlx5 PMD implements several software engines to manage
steering hardware facility:

   - FW Steering - Verbs/Direct Verbs, uses FW calls to manage flows
   - SW Steering - DevX/mlx5dv, uses WQEs to access table memory directly

However, there are still some disadvantages:

   - performance is limited, we should invoke firmware either to
     manage the entire flow, or to handle some internal steering objects

   - organizing and preparing flow infrastructure (actions, matchers,
     groups, etc.) on the flow inserting is sure to cause slow flow
     insertion

   - security, exposing the low-level steering entries directly to the
     userspace may cause security risks

A new hardware WQE based steering operation with codename "HW Steering"
is going to be introduced to get rid of the security risks. And it will
take advantage of the recently new introduced async queue-based rte_flow
APIs to prepare everything in advance to achieve high insertion rate.

In this new HW steering engine, the original SW steering rte_flow API
will not be supported in the first implementation, only the new async
queue-based flow operations is going to be supported. A new steering
mode parameter for dv_flow_en will be introduced and user will be
able to engage the new steering engine.

This commit adds the basic driver operation.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
drivers/net/mlx5/linux/mlx5_flow_os.h
drivers/net/mlx5/meson.build
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_hw.c [new file with mode: 0644]
drivers/net/mlx5/windows/mlx5_flow_os.h

index 1926d26..e28a9e0 100644 (file)
@@ -9,6 +9,7 @@
 
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
+extern const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
 #endif
 
 /**
index 2f6d8cb..39a2b8c 100644 (file)
@@ -16,6 +16,7 @@ sources = files(
         'mlx5_flow.c',
         'mlx5_flow_meter.c',
         'mlx5_flow_dv.c',
+        'mlx5_flow_hw.c',
         'mlx5_flow_aso.c',
         'mlx5_flow_flex.c',
         'mlx5_mac.c',
index a87ac8e..b289f13 100644 (file)
@@ -76,6 +76,7 @@ const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
        [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
        [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
+       [MLX5_FLOW_TYPE_HW] = &mlx5_flow_hw_drv_ops,
 #endif
        [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
        [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
index a20773e..b70ef0c 100644 (file)
@@ -452,6 +452,7 @@ enum mlx5_flow_drv_type {
        MLX5_FLOW_TYPE_MIN,
        MLX5_FLOW_TYPE_DV,
        MLX5_FLOW_TYPE_VERBS,
+       MLX5_FLOW_TYPE_HW,
        MLX5_FLOW_TYPE_MAX,
 };
 
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
new file mode 100644 (file)
index 0000000..729d591
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include <rte_flow.h>
+
+#include "mlx5_flow.h"
+
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+
+const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
+
+#endif
index dfcb012..52013b0 100644 (file)
@@ -10,6 +10,7 @@
 
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
+extern const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
 #endif
 
 /**