net/mlx5: fix GRE protocol type translation for Verbs
authorGregory Etelson <getelson@nvidia.com>
Thu, 23 Dec 2021 13:16:38 +0000 (15:16 +0200)
committerRaslan Darawsheh <rasland@nvidia.com>
Thu, 6 Jan 2022 09:07:49 +0000 (10:07 +0100)
When application creates several flows to match on GRE tunnel without
explicitly specifying GRE protocol type value in flow rules, PMD will
translate that to zero mask.
RDMA-CORE cannot distinguish between different inner flow types and
produces identical matchers for each zero mask.

The patch extracts inner header type from flow rule and forces it in
GRE protocol type, if application did not specify any.

Fixes: 84c406e74524 ("net/mlx5: add flow translate function")
Cc: stable@dpdk.org
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_verbs.c

index 8c131d6..125d858 100644 (file)
@@ -1450,6 +1450,20 @@ flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
        return ct;
 }
 
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+       if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+               return RTE_ETHER_TYPE_TEB;
+       else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+               return RTE_ETHER_TYPE_IPV4;
+       else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+               return RTE_ETHER_TYPE_IPV6;
+       else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+               return RTE_ETHER_TYPE_MPLS;
+       return 0;
+}
+
 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
                             const struct mlx5_flow_tunnel *tunnel,
                             uint32_t group, uint32_t *table,
index 5d9fbff..0383976 100644 (file)
@@ -93,20 +93,6 @@ static int
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
                                  uint32_t rix_jump);
 
-static inline uint16_t
-mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
-{
-       if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
-               return RTE_ETHER_TYPE_TEB;
-       else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
-               return RTE_ETHER_TYPE_IPV4;
-       else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
-               return RTE_ETHER_TYPE_IPV6;
-       else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
-               return RTE_ETHER_TYPE_MPLS;
-       return 0;
-}
-
 static int16_t
 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
 {
index 29cd694..192a00d 100644 (file)
@@ -907,6 +907,7 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
                .size = size,
        };
 #else
+       static const struct rte_flow_item_gre empty_gre = {0,};
        const struct rte_flow_item_gre *spec = item->spec;
        const struct rte_flow_item_gre *mask = item->mask;
        unsigned int size = sizeof(struct ibv_flow_spec_gre);
@@ -915,17 +916,29 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
                .size = size,
        };
 
-       if (!mask)
-               mask = &rte_flow_item_gre_mask;
-       if (spec) {
-               tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
-               tunnel.val.protocol = spec->protocol;
-               tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
-               tunnel.mask.protocol = mask->protocol;
-               /* Remove unwanted bits from values. */
-               tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+       if (!spec) {
+               spec = &empty_gre;
+               mask = &empty_gre;
+       } else {
+               if (!mask)
+                       mask = &rte_flow_item_gre_mask;
+       }
+       tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+       tunnel.val.protocol = spec->protocol;
+       tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+       tunnel.mask.protocol = mask->protocol;
+       /* Remove unwanted bits from values. */
+       tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+       tunnel.val.key &= tunnel.mask.key;
+       if (tunnel.mask.protocol) {
                tunnel.val.protocol &= tunnel.mask.protocol;
-               tunnel.val.key &= tunnel.mask.key;
+       } else {
+               tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags);
+               if (tunnel.val.protocol) {
+                       tunnel.mask.protocol = 0xFFFF;
+                       tunnel.val.protocol =
+                               rte_cpu_to_be_16(tunnel.val.protocol);
+               }
        }
 #endif
        if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
@@ -1803,8 +1816,6 @@ flow_verbs_translate(struct rte_eth_dev *dev,
                        item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
-                       flow_verbs_translate_item_gre(dev_flow, items,
-                                                     item_flags);
                        subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
                        item_flags |= MLX5_FLOW_LAYER_GRE;
                        break;
@@ -1820,6 +1831,8 @@ flow_verbs_translate(struct rte_eth_dev *dev,
                                                  NULL, "item not supported");
                }
        }
+       if (item_flags & MLX5_FLOW_LAYER_GRE)
+               flow_verbs_translate_item_gre(dev_flow, items, item_flags);
        dev_flow->handle->layers = item_flags;
        /* Other members of attr will be ignored. */
        dev_flow->verbs.attr.priority =