]> git.droids-corp.org - dpdk.git/commitdiff
net/mlx5: support ESP item on Windows
authorRaja Zidane <rzidane@nvidia.com>
Thu, 2 Jun 2022 13:03:08 +0000 (16:03 +0300)
committerRaslan Darawsheh <rasland@nvidia.com>
Sun, 5 Jun 2022 15:04:48 +0000 (17:04 +0200)
ESP item is not supported on Windows, yet it is expanded from the
expansion graph when trying to create default flow to RSS all packets.

Support ESP item match (without ability to match on SPI field on Windows).
Split ESP validation per OS.

Signed-off-by: Raja Zidane <rzidane@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/net/mlx5/linux/mlx5_flow_os.c
drivers/net/mlx5/linux/mlx5_flow_os.h
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/windows/mlx5_flow_os.c
drivers/net/mlx5/windows/mlx5_flow_os.h

index a5956c255af228975e9e814e58bf0ea7d09fe348..3c9a823edfb09da3ec8c691e40327eb9955b484f 100644 (file)
@@ -9,6 +9,45 @@
 /* Key of thread specific flow workspace data. */
 static rte_thread_key key_workspace;
 
+int
+mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
+                           uint64_t item_flags,
+                           uint8_t target_protocol,
+                           struct rte_flow_error *error)
+{
+       const struct rte_flow_item_esp *mask = item->mask;
+       const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+       const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+                                     MLX5_FLOW_LAYER_OUTER_L3;
+       const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+                                     MLX5_FLOW_LAYER_OUTER_L4;
+       int ret;
+
+       if (!(item_flags & l3m))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "L3 is mandatory to filter on L4");
+       if (item_flags & l4m)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "multiple L4 layers not supported");
+       if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "protocol filtering not compatible"
+                                         " with ESP layer");
+       if (!mask)
+               mask = &rte_flow_item_esp_mask;
+       ret = mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&rte_flow_item_esp_mask,
+                sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+                error);
+       if (ret < 0)
+               return ret;
+       return 0;
+}
+
 int
 mlx5_flow_os_init_workspace_once(void)
 {
index e28a9e04360be15db54c0ce2b7a2465a121c3924..bcb48b3e563b4a1fd93ef863771bed5a38f52d57 100644 (file)
@@ -482,4 +482,26 @@ mlx5_os_flow_dr_sync_domain(void *domain, uint32_t flags)
 {
        return mlx5_glue->dr_sync_domain(domain, flags);
 }
+
+/**
+ * Validate ESP item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ *   The next protocol in the previous item.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
+                           uint64_t item_flags,
+                           uint8_t target_protocol,
+                           struct rte_flow_error *error);
+
 #endif /* RTE_PMD_MLX5_FLOW_OS_H_ */
index de0c15fef6d135fc58bb353dc22b26f4dbc6dcc5..5d6c321d95ac1d1574f96e218c84eccde4aa1b6f 100644 (file)
@@ -2646,60 +2646,6 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
        return 0;
 }
 
-/**
- * Validate ESP item.
- *
- * @param[in] item
- *   Item specification.
- * @param[in] item_flags
- *   Bit-fields that holds the items detected until now.
- * @param[in] target_protocol
- *   The next protocol in the previous item.
- * @param[out] error
- *   Pointer to error structure.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_flow_validate_item_esp(const struct rte_flow_item *item,
-                           uint64_t item_flags,
-                           uint8_t target_protocol,
-                           struct rte_flow_error *error)
-{
-       const struct rte_flow_item_esp *mask = item->mask;
-       const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
-       const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
-                                     MLX5_FLOW_LAYER_OUTER_L3;
-       const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
-                                     MLX5_FLOW_LAYER_OUTER_L4;
-       int ret;
-
-       if (!(item_flags & l3m))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "L3 is mandatory to filter on L4");
-       if (item_flags & l4m)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "multiple L4 layers not supported");
-       if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "protocol filtering not compatible"
-                                         " with ESP layer");
-       if (!mask)
-               mask = &rte_flow_item_esp_mask;
-       ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
-                (const uint8_t *)&rte_flow_item_esp_mask,
-                sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
-                error);
-       if (ret < 0)
-               return ret;
-       return 0;
-}
-
 /**
  * Validate UDP item.
  *
index fcb05abcedcc82724d680100cda80c66c649e1e0..eb13365c61077f374de8a0267a00f933415fd7b7 100644 (file)
@@ -1828,10 +1828,6 @@ int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
                                uint8_t target_protocol,
                                const struct rte_flow_item_tcp *flow_mask,
                                struct rte_flow_error *error);
-int mlx5_flow_validate_item_esp(const struct rte_flow_item *item,
-                               uint64_t item_flags,
-                               uint8_t target_protocol,
-                               struct rte_flow_error *error);
 int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
                                uint64_t item_flags,
                                uint8_t target_protocol,
index bbce67b6768c124a0db4e78f001666ce9095af4f..a575e3182e732513933ebfbc1738ca7fe9677390 100644 (file)
@@ -6957,7 +6957,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                case RTE_FLOW_ITEM_TYPE_VOID:
                        break;
                case RTE_FLOW_ITEM_TYPE_ESP:
-                       ret = mlx5_flow_validate_item_esp(items, item_flags,
+                       ret = mlx5_flow_os_validate_item_esp(items, item_flags,
                                                          next_protocol,
                                                          error);
                        if (ret < 0)
index f5e3893ed48970e5e603b22edbd7b6cb1206c59f..48d7da41b17e1d43bcbbf7d868638296512e9158 100644 (file)
@@ -416,3 +416,48 @@ mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
        rte_errno = old_err;
        return err;
 }
+
+int
+mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
+                           uint64_t item_flags,
+                           uint8_t target_protocol,
+                           struct rte_flow_error *error)
+{
+       const struct rte_flow_item_esp *mask = item->mask;
+       const struct rte_flow_item_esp *spec = item->spec;
+       const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+       const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+                                     MLX5_FLOW_LAYER_OUTER_L3;
+       const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+                                     MLX5_FLOW_LAYER_OUTER_L4;
+       int ret;
+
+       if (!(item_flags & l3m))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "L3 is mandatory to filter on L4");
+       if (item_flags & l4m)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "multiple L4 layers not supported");
+       if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "protocol filtering not compatible"
+                                         " with ESP layer");
+       if (!mask)
+               mask = &rte_flow_item_esp_mask;
+       if (spec && (spec->hdr.spi & mask->hdr.spi))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "matching on spi field in esp is not"
+                                         " supported on Windows");
+       ret = mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&rte_flow_item_esp_mask,
+                sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+                error);
+       if (ret < 0)
+               return ret;
+       return 0;
+}
index 52013b06a02253f04952915f934ab23275a8490b..347ec645804df1f5a9147110a28b528327ceea27 100644 (file)
@@ -46,6 +46,7 @@ mlx5_flow_os_item_supported(int item)
        case RTE_FLOW_ITEM_TYPE_TCP:
        case RTE_FLOW_ITEM_TYPE_IPV6:
        case RTE_FLOW_ITEM_TYPE_VLAN:
+       case RTE_FLOW_ITEM_TYPE_ESP:
                return true;
        default:
                return false;
@@ -426,4 +427,26 @@ int mlx5_flow_os_create_flow(void *matcher, void *match_value,
                             size_t num_actions,
                             void *actions[], void **flow);
 int mlx5_flow_os_destroy_flow(void *drv_flow_ptr);
+
+/**
+ * Validate ESP item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ *   The next protocol in the previous item.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
+                           uint64_t item_flags,
+                           uint8_t target_protocol,
+                           struct rte_flow_error *error);
+
 #endif /* RTE_PMD_MLX5_FLOW_OS_H_ */