mbuf: add namespace to offload flags
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 549b305..5d19ef1 100644 (file)
@@ -98,12 +98,27 @@ struct mlx5_flow_expand_node {
        uint64_t rss_types;
        /**<
         * RSS types bit-field associated with this node
-        * (see ETH_RSS_* definitions).
+        * (see RTE_ETH_RSS_* definitions).
+        */
+       uint64_t node_flags;
+       /**<
+        *  Bit-fields that define how the node is used in the expansion.
+        * (see MLX5_EXPANSION_NODE_* definitions).
         */
-       uint8_t optional;
-       /**< optional expand field. Default 0 to expand, 1 not go deeper. */
 };
 
+/* Optional expand field. The expansion alg will not go deeper. */
+#define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
+
+/* The node is not added implicitly as expansion to the flow pattern.
+ * If the node type does not match the flow pattern item type, the
+ * expansion alg will go deeper to its next items.
+ * In the current implementation, the list of next nodes indexes can
+ * have up to one node with this flag set and it has to be the last
+ * node index (before the list terminator).
+ */
+#define MLX5_EXPANSION_NODE_EXPLICIT (UINT64_C(1) << 1)
+
 /** Object returned by mlx5_flow_expand_rss(). */
 struct mlx5_flow_expand_rss {
        uint32_t entries;
@@ -117,6 +132,12 @@ struct mlx5_flow_expand_rss {
 static void
 mlx5_dbg__print_pattern(const struct rte_flow_item *item);
 
+static const struct mlx5_flow_expand_node *
+mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
+               unsigned int item_idx,
+               const struct mlx5_flow_expand_node graph[],
+               const struct mlx5_flow_expand_node *node);
+
 static bool
 mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
 {
@@ -135,6 +156,7 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item)
        case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
        case RTE_FLOW_ITEM_TYPE_GRE_KEY:
        case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+       case RTE_FLOW_ITEM_TYPE_GTP:
                return true;
        default:
                break;
@@ -243,6 +265,26 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
        return ret;
 }
 
+static const int *
+mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[],
+               const int *next_node)
+{
+       const struct mlx5_flow_expand_node *node = NULL;
+       const int *next = next_node;
+
+       while (next && *next) {
+               /*
+                * Skip the nodes with the MLX5_EXPANSION_NODE_EXPLICIT
+                * flag set, because they were not found in the flow pattern.
+                */
+               node = &graph[*next];
+               if (!(node->node_flags & MLX5_EXPANSION_NODE_EXPLICIT))
+                       break;
+               next = node->next;
+       }
+       return next;
+}
+
 #define MLX5_RSS_EXP_ELT_N 16
 
 /**
@@ -256,7 +298,7 @@ mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
  * @param[in] pattern
  *   User flow pattern.
  * @param[in] types
- *   RSS types to expand (see ETH_RSS_* definitions).
+ *   RSS types to expand (see RTE_ETH_RSS_* definitions).
  * @param[in] graph
  *   Input graph to expand @p pattern according to @p types.
  * @param[in] graph_root_index
@@ -282,7 +324,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        const int *stack[MLX5_RSS_EXP_ELT_N];
        int stack_pos = 0;
        struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
-       unsigned int i;
+       unsigned int i, item_idx, last_expand_item_idx = 0;
        size_t lsize;
        size_t user_pattern_size = 0;
        void *addr = NULL;
@@ -290,7 +332,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        struct rte_flow_item missed_item;
        int missed = 0;
        int elt = 0;
-       const struct rte_flow_item *last_item = NULL;
+       const struct rte_flow_item *last_expand_item = NULL;
 
        memset(&missed_item, 0, sizeof(missed_item));
        lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
@@ -301,16 +343,26 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
        buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
        buf->entries = 0;
        addr = buf->entry[0].pattern;
-       for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+       for (item = pattern, item_idx = 0;
+                       item->type != RTE_FLOW_ITEM_TYPE_END;
+                       item++, item_idx++) {
                if (!mlx5_flow_is_rss_expandable_item(item)) {
                        user_pattern_size += sizeof(*item);
                        continue;
                }
-               last_item = item;
-               for (i = 0; node->next && node->next[i]; ++i) {
+               last_expand_item = item;
+               last_expand_item_idx = item_idx;
+               i = 0;
+               while (node->next && node->next[i]) {
                        next = &graph[node->next[i]];
                        if (next->type == item->type)
                                break;
+                       if (next->node_flags & MLX5_EXPANSION_NODE_EXPLICIT) {
+                               node = next;
+                               i = 0;
+                       } else {
+                               ++i;
+                       }
                }
                if (next)
                        node = next;
@@ -331,7 +383,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
         * Check if the last valid item has spec set, need complete pattern,
         * and the pattern can be used for expansion.
         */
-       missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
+       missed_item.type = mlx5_flow_expand_rss_item_complete(last_expand_item);
        if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
                /* Item type END indicates expansion is not required. */
                return lsize;
@@ -366,9 +418,13 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                        addr = (void *)(((uintptr_t)addr) +
                                        elt * sizeof(*item));
                }
+       } else if (last_expand_item != NULL) {
+               node = mlx5_flow_expand_rss_adjust_node(pattern,
+                               last_expand_item_idx, graph, node);
        }
        memset(flow_items, 0, sizeof(flow_items));
-       next_node = node->next;
+       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                       node->next);
        stack[stack_pos] = next_node;
        node = next_node ? &graph[*next_node] : NULL;
        while (node) {
@@ -403,8 +459,10 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                        addr = (void *)(((uintptr_t)addr) + n);
                }
                /* Go deeper. */
-               if (!node->optional && node->next) {
-                       next_node = node->next;
+               if (!(node->node_flags & MLX5_EXPANSION_NODE_OPTIONAL) &&
+                               node->next) {
+                       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                                       node->next);
                        if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
                                rte_errno = E2BIG;
                                return -rte_errno;
@@ -412,15 +470,27 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
                        stack[stack_pos] = next_node;
                } else if (*(next_node + 1)) {
                        /* Follow up with the next possibility. */
+                       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                                       ++next_node);
+               } else if (!stack_pos) {
+                       /*
+                        * Completing the traverse over the different paths.
+                        * The next_node is advanced to the terminator.
+                        */
                        ++next_node;
                } else {
                        /* Move to the next path. */
-                       if (stack_pos)
+                       while (stack_pos) {
                                next_node = stack[--stack_pos];
-                       next_node++;
+                               next_node++;
+                               if (*next_node)
+                                       break;
+                       }
+                       next_node = mlx5_flow_expand_rss_skip_explicit(graph,
+                                       next_node);
                        stack[stack_pos] = next_node;
                }
-               node = *next_node ? &graph[*next_node] : NULL;
+               node = next_node && *next_node ? &graph[*next_node] : NULL;
        };
        return lsize;
 }
@@ -428,10 +498,7 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
 enum mlx5_expansion {
        MLX5_EXPANSION_ROOT,
        MLX5_EXPANSION_ROOT_OUTER,
-       MLX5_EXPANSION_ROOT_ETH_VLAN,
-       MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
        MLX5_EXPANSION_OUTER_ETH,
-       MLX5_EXPANSION_OUTER_ETH_VLAN,
        MLX5_EXPANSION_OUTER_VLAN,
        MLX5_EXPANSION_OUTER_IPV4,
        MLX5_EXPANSION_OUTER_IPV4_UDP,
@@ -440,13 +507,14 @@ enum mlx5_expansion {
        MLX5_EXPANSION_OUTER_IPV6_UDP,
        MLX5_EXPANSION_OUTER_IPV6_TCP,
        MLX5_EXPANSION_VXLAN,
+       MLX5_EXPANSION_STD_VXLAN,
+       MLX5_EXPANSION_L3_VXLAN,
        MLX5_EXPANSION_VXLAN_GPE,
        MLX5_EXPANSION_GRE,
        MLX5_EXPANSION_NVGRE,
        MLX5_EXPANSION_GRE_KEY,
        MLX5_EXPANSION_MPLS,
        MLX5_EXPANSION_ETH,
-       MLX5_EXPANSION_ETH_VLAN,
        MLX5_EXPANSION_VLAN,
        MLX5_EXPANSION_IPV4,
        MLX5_EXPANSION_IPV4_UDP,
@@ -455,6 +523,7 @@ enum mlx5_expansion {
        MLX5_EXPANSION_IPV6_UDP,
        MLX5_EXPANSION_IPV6_TCP,
        MLX5_EXPANSION_IPV6_FRAG_EXT,
+       MLX5_EXPANSION_GTP
 };
 
 /** Supported expansion of items. */
@@ -471,22 +540,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_OUTER_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_END,
        },
-       [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
-               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
-               .type = RTE_FLOW_ITEM_TYPE_END,
-       },
-       [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
-               .next = MLX5_FLOW_EXPAND_RSS_NEXT
-                                               (MLX5_EXPANSION_OUTER_ETH_VLAN),
-               .type = RTE_FLOW_ITEM_TYPE_END,
-       },
        [MLX5_EXPANSION_OUTER_ETH] = {
-               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
-                                                 MLX5_EXPANSION_OUTER_IPV6),
-               .type = RTE_FLOW_ITEM_TYPE_ETH,
-               .rss_types = 0,
-       },
-       [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
                .type = RTE_FLOW_ITEM_TYPE_ETH,
                .rss_types = 0,
@@ -495,6 +549,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
                                                  MLX5_EXPANSION_OUTER_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VLAN,
+               .node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
        },
        [MLX5_EXPANSION_OUTER_IPV4] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -505,19 +560,20 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                         MLX5_EXPANSION_IPV4,
                         MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_IPV4,
-               .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-                       ETH_RSS_NONFRAG_IPV4_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+                       RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
        },
        [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
                                                  MLX5_EXPANSION_VXLAN_GPE,
-                                                 MLX5_EXPANSION_MPLS),
+                                                 MLX5_EXPANSION_MPLS,
+                                                 MLX5_EXPANSION_GTP),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
        },
        [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
        },
        [MLX5_EXPANSION_OUTER_IPV6] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT
@@ -528,19 +584,20 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                         MLX5_EXPANSION_GRE,
                         MLX5_EXPANSION_NVGRE),
                .type = RTE_FLOW_ITEM_TYPE_IPV6,
-               .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-                       ETH_RSS_NONFRAG_IPV6_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+                       RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
        },
        [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
                                                  MLX5_EXPANSION_VXLAN_GPE,
-                                                 MLX5_EXPANSION_MPLS),
+                                                 MLX5_EXPANSION_MPLS,
+                                                 MLX5_EXPANSION_GTP),
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
        },
        [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
        },
        [MLX5_EXPANSION_VXLAN] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
@@ -548,6 +605,15 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VXLAN,
        },
+       [MLX5_EXPANSION_STD_VXLAN] = {
+                       .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+                                       .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+       },
+       [MLX5_EXPANSION_L3_VXLAN] = {
+                       .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+                                       MLX5_EXPANSION_IPV6),
+                                       .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+       },
        [MLX5_EXPANSION_VXLAN_GPE] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
                                                  MLX5_EXPANSION_IPV4,
@@ -566,7 +632,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_IPV6,
                                                  MLX5_EXPANSION_MPLS),
                .type = RTE_FLOW_ITEM_TYPE_GRE_KEY,
-               .optional = 1,
+               .node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
        },
        [MLX5_EXPANSION_NVGRE] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
@@ -577,13 +643,9 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                                                  MLX5_EXPANSION_IPV6,
                                                  MLX5_EXPANSION_ETH),
                .type = RTE_FLOW_ITEM_TYPE_MPLS,
+               .node_flags = MLX5_EXPANSION_NODE_OPTIONAL,
        },
        [MLX5_EXPANSION_ETH] = {
-               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
-                                                 MLX5_EXPANSION_IPV6),
-               .type = RTE_FLOW_ITEM_TYPE_ETH,
-       },
-       [MLX5_EXPANSION_ETH_VLAN] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
                .type = RTE_FLOW_ITEM_TYPE_ETH,
        },
@@ -591,41 +653,47 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
                                                  MLX5_EXPANSION_IPV6),
                .type = RTE_FLOW_ITEM_TYPE_VLAN,
+               .node_flags = MLX5_EXPANSION_NODE_EXPLICIT,
        },
        [MLX5_EXPANSION_IPV4] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
                                                  MLX5_EXPANSION_IPV4_TCP),
                .type = RTE_FLOW_ITEM_TYPE_IPV4,
-               .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
-                       ETH_RSS_NONFRAG_IPV4_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+                       RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
        },
        [MLX5_EXPANSION_IPV4_UDP] = {
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
        },
        [MLX5_EXPANSION_IPV4_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
        },
        [MLX5_EXPANSION_IPV6] = {
                .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
                                                  MLX5_EXPANSION_IPV6_TCP,
                                                  MLX5_EXPANSION_IPV6_FRAG_EXT),
                .type = RTE_FLOW_ITEM_TYPE_IPV6,
-               .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
-                       ETH_RSS_NONFRAG_IPV6_OTHER,
+               .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+                       RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
        },
        [MLX5_EXPANSION_IPV6_UDP] = {
                .type = RTE_FLOW_ITEM_TYPE_UDP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
        },
        [MLX5_EXPANSION_IPV6_TCP] = {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
-               .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+               .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
        },
        [MLX5_EXPANSION_IPV6_FRAG_EXT] = {
                .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
        },
+       [MLX5_EXPANSION_GTP] = {
+               .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+                                                 MLX5_EXPANSION_IPV6),
+               .type = RTE_FLOW_ITEM_TYPE_GTP,
+       },
 };
 
 static struct rte_flow_action_handle *
@@ -935,13 +1003,15 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev,
  *   Pointer to device flow rule attributes.
  * @param[in] subpriority
  *   The priority based on the items.
+ * @param[in] external
+ *   Flow is user flow.
  * @return
  *   The matcher priority of the flow.
  */
 uint16_t
 mlx5_get_matcher_priority(struct rte_eth_dev *dev,
                          const struct rte_flow_attr *attr,
-                         uint32_t subpriority)
+                         uint32_t subpriority, bool external)
 {
        uint16_t priority = (uint16_t)attr->priority;
        struct mlx5_priv *priv = dev->data->dev_private;
@@ -950,6 +1020,9 @@ mlx5_get_matcher_priority(struct rte_eth_dev *dev,
                if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
                        priority = priv->config.flow_prio - 1;
                return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
+       } else if (!external && attr->transfer && attr->group == 0 &&
+                  attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
+               return (priv->config.flow_prio - 1) * 3;
        }
        if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
                priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
@@ -1027,7 +1100,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
  * @param[in] tunnel
  *   1 when the hash field is for a tunnel item.
  * @param[in] layer_types
- *   ETH_RSS_* types.
+ *   RTE_ETH_RSS_* types.
  * @param[in] hash_fields
  *   Item hash fields.
  *
@@ -1321,9 +1394,7 @@ mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
                        data->dynf_meta = 1;
                        data->flow_meta_mask = rte_flow_dynf_metadata_mask;
                        data->flow_meta_offset = rte_flow_dynf_metadata_offs;
-                       data->flow_meta_port_mask = (uint32_t)~0;
-                       if (priv->config.dv_xmeta_en == MLX5_XMETA_MODE_META16)
-                               data->flow_meta_port_mask >>= 16;
+                       data->flow_meta_port_mask = priv->sh->dv_meta_mask;
                }
        }
 }
@@ -1582,14 +1653,14 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,
                                          &rss->types,
                                          "some RSS protocols are not"
                                          " supported");
-       if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
-           !(rss->types & ETH_RSS_IP))
+       if ((rss->types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY)) &&
+           !(rss->types & RTE_ETH_RSS_IP))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "L3 partial RSS requested but L3 RSS"
                                          " type not specified");
-       if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
-           !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
+       if ((rss->types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) &&
+           !(rss->types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP)))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
                                          "L4 partial RSS requested but L4 RSS"
@@ -1992,6 +2063,10 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L2 layer should not follow VLAN");
+       if (item_flags & MLX5_FLOW_LAYER_GTP)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "L2 layer should not follow GTP");
        if (!mask)
                mask = &rte_flow_item_eth_mask;
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -2420,6 +2495,8 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
+ * @param[in] udp_dport
+ *   UDP destination port
  * @param[in] item
  *   Item specification.
  * @param[in] item_flags
@@ -2434,6 +2511,7 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
  */
 int
 mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
+                             uint16_t udp_dport,
                              const struct rte_flow_item *item,
                              uint64_t item_flags,
                              const struct rte_flow_attr *attr,
@@ -2469,12 +2547,18 @@ mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
                                          "no outer UDP layer found");
        if (!mask)
                mask = &rte_flow_item_vxlan_mask;
-       /* FDB domain & NIC domain non-zero group */
-       if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
-               valid_mask = &nic_mask;
-       /* Group zero in NIC domain */
-       if (!attr->group && !attr->transfer && priv->sh->tunnel_header_0_1)
-               valid_mask = &nic_mask;
+
+       if (priv->sh->steering_format_version !=
+           MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
+           !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) {
+               /* FDB domain & NIC domain non-zero group */
+               if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
+                       valid_mask = &nic_mask;
+               /* Group zero in NIC domain */
+               if (!attr->group && !attr->transfer &&
+                   priv->sh->tunnel_header_0_1)
+                       valid_mask = &nic_mask;
+       }
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)valid_mask,
@@ -3515,6 +3599,8 @@ flow_get_rss_action(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_action_rss *rss = NULL;
+       struct mlx5_meter_policy_action_container *acg;
+       struct mlx5_meter_policy_action_container *acy;
 
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
@@ -3550,9 +3636,18 @@ flow_get_rss_action(struct rte_eth_dev *dev,
                                        if (!policy)
                                                return NULL;
                                }
-                               if (policy->is_rss)
-                                       rss =
-                               policy->act_cnt[RTE_COLOR_GREEN].rss->conf;
+                               if (policy->is_rss) {
+                                       acg =
+                                       &policy->act_cnt[RTE_COLOR_GREEN];
+                                       acy =
+                                       &policy->act_cnt[RTE_COLOR_YELLOW];
+                                       if (acg->fate_action ==
+                                           MLX5_FLOW_FATE_SHARED_RSS)
+                                               rss = acg->rss->conf;
+                                       else if (acy->fate_action ==
+                                                MLX5_FLOW_FATE_SHARED_RSS)
+                                               rss = acy->rss->conf;
+                               }
                        }
                        break;
                }
@@ -3753,20 +3848,8 @@ flow_get_shared_rss_action(struct rte_eth_dev *dev,
 }
 
 static unsigned int
-find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
+find_graph_root(uint32_t rss_level)
 {
-       const struct rte_flow_item *item;
-       unsigned int has_vlan = 0;
-
-       for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
-               if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
-                       has_vlan = 1;
-                       break;
-               }
-       }
-       if (has_vlan)
-               return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
-                                      MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
        return rss_level < 2 ? MLX5_EXPANSION_ROOT :
                               MLX5_EXPANSION_ROOT_OUTER;
 }
@@ -4698,7 +4781,7 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
                        uint8_t fate = final_policy->act_cnt[i].fate_action;
 
                        if (fate == MLX5_FLOW_FATE_SHARED_RSS) {
-                               const void *rss_act =
+                               const struct rte_flow_action_rss *rss_act =
                                        final_policy->act_cnt[i].rss->conf;
                                struct rte_flow_action rss_actions[2] = {
                                        [0] = {
@@ -4734,6 +4817,9 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
                                rss_desc_v[i].tunnel =
                                                !!(dev_flow.handle->layers &
                                                   MLX5_FLOW_LAYER_TUNNEL);
+                               /* Use the RSS queues in the containers. */
+                               rss_desc_v[i].queue =
+                                       (uint16_t *)(uintptr_t)rss_act->queue;
                                rss_desc[i] = &rss_desc_v[i];
                        } else if (fate == MLX5_FLOW_FATE_QUEUE) {
                                /* This is queue action. */
@@ -5248,6 +5334,7 @@ flow_check_match_action(const struct rte_flow_action actions[],
                        int *modify_after_mirror)
 {
        const struct rte_flow_action_sample *sample;
+       const struct rte_flow_action_raw_decap *decap;
        int actions_n = 0;
        uint32_t ratio = 0;
        int sub_type = 0;
@@ -5300,12 +5387,29 @@ flow_check_match_action(const struct rte_flow_action actions[],
                case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
                case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
                case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
-               case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
                case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
                case RTE_FLOW_ACTION_TYPE_METER:
                        if (fdb_mirror)
                                *modify_after_mirror = 1;
                        break;
+               case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+                       decap = actions->conf;
+                       while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
+                               ;
+                       actions_n++;
+                       if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+                               const struct rte_flow_action_raw_encap *encap =
+                                                               actions->conf;
+                               if (decap->size <=
+                                       MLX5_ENCAPSULATION_DECISION_SIZE &&
+                                   encap->size >
+                                       MLX5_ENCAPSULATION_DECISION_SIZE)
+                                       /* L3 encap. */
+                                       break;
+                       }
+                       if (fdb_mirror)
+                               *modify_after_mirror = 1;
+                       break;
                default:
                        break;
                }
@@ -6239,7 +6343,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
        int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
        union {
                struct mlx5_flow_expand_rss buf;
-               uint8_t buffer[2048];
+               uint8_t buffer[4096];
        } expand_buffer;
        union {
                struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
@@ -6323,14 +6427,14 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
                 * mlx5_flow_hashfields_adjust() in advance.
                 */
                rss_desc->level = rss->level;
-               /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-               rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
+               /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
+               rss_desc->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
        }
        flow->dev_handles = 0;
        if (rss && rss->types) {
                unsigned int graph_root;
 
-               graph_root = find_graph_root(items, rss->level);
+               graph_root = find_graph_root(rss->level);
                ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
                                           items, rss->types,
                                           mlx5_support_expansion, graph_root);
@@ -6503,6 +6607,80 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
                                                   actions, false, &error);
 }
 
+/**
+ * Create a dedicated flow rule on e-switch table 1, matches ESW manager
+ * and sq number, directs all packets to peer vport.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param txq
+ *   Txq index.
+ *
+ * @return
+ *   Flow ID on success, 0 otherwise and rte_errno is set.
+ */
+uint32_t
+mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)
+{
+       struct rte_flow_attr attr = {
+               .group = 0,
+               .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
+               .ingress = 1,
+               .egress = 0,
+               .transfer = 1,
+       };
+       struct rte_flow_item_port_id port_spec = {
+               .id = MLX5_PORT_ESW_MGR,
+       };
+       struct mlx5_rte_flow_item_tx_queue txq_spec = {
+               .queue = txq,
+       };
+       struct rte_flow_item pattern[] = {
+               {
+                       .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
+                       .spec = &port_spec,
+               },
+               {
+                       .type = (enum rte_flow_item_type)
+                               MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+                       .spec = &txq_spec,
+               },
+               {
+                       .type = RTE_FLOW_ITEM_TYPE_END,
+               },
+       };
+       struct rte_flow_action_jump jump = {
+               .group = 1,
+       };
+       struct rte_flow_action_port_id port = {
+               .id = dev->data->port_id,
+       };
+       struct rte_flow_action actions[] = {
+               {
+                       .type = RTE_FLOW_ACTION_TYPE_JUMP,
+                       .conf = &jump,
+               },
+               {
+                       .type = RTE_FLOW_ACTION_TYPE_END,
+               },
+       };
+       struct rte_flow_error error;
+
+       /*
+        * Creates group 0, highest priority jump flow.
+        * Matches txq to bypass kernel packets.
+        */
+       if (flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions,
+                            false, &error) == 0)
+               return 0;
+       /* Create group 1, lowest priority redirect flow for txq. */
+       attr.group = 1;
+       actions[0].conf = &port;
+       actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
+       return flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern,
+                               actions, false, &error);
+}
+
 /**
  * Validate a flow supported by the NIC.
  *
@@ -6948,7 +7126,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
        if (!priv->reta_idx_n || !priv->rxqs_n) {
                return 0;
        }
-       if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+       if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
                action_rss.types = 0;
        for (i = 0; i != priv->reta_idx_n; ++i)
                queue[i] = (*priv->reta_idx)[i];
@@ -7563,7 +7741,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
        }
        mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
        size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
-       mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
+       mem_mng->umem = mlx5_os_umem_reg(sh->cdev->ctx, mem, size,
                                                 IBV_ACCESS_LOCAL_WRITE);
        if (!mem_mng->umem) {
                rte_errno = errno;
@@ -7574,10 +7752,10 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
        mkey_attr.addr = (uintptr_t)mem;
        mkey_attr.size = size;
        mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
-       mkey_attr.pd = sh->pdn;
+       mkey_attr.pd = sh->cdev->pdn;
        mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
        mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
-       mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
+       mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr);
        if (!mem_mng->dm) {
                mlx5_os_umem_dereg(mem_mng->umem);
                rte_errno = errno;
@@ -8616,7 +8794,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
                                (error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                NULL, "invalid port configuration");
-               if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+               if (!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
                        ctx->action_rss.types = 0;
                for (i = 0; i != priv->reta_idx_n; ++i)
                        ctx->queue[i] = (*priv->reta_idx)[i];
@@ -9058,6 +9236,9 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
                err_msg = "unsupported tunnel type";
                goto out;
        case RTE_FLOW_ITEM_TYPE_VXLAN:
+       case RTE_FLOW_ITEM_TYPE_GRE:
+       case RTE_FLOW_ITEM_TYPE_NVGRE:
+       case RTE_FLOW_ITEM_TYPE_GENEVE:
                break;
        }
 
@@ -9210,7 +9391,7 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
 {
        uint64_t ol_flags = m->ol_flags;
        const struct mlx5_flow_tbl_data_entry *tble;
-       const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+       const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
 
        if (!is_tunnel_offload_active(dev)) {
                info->flags = 0;
@@ -9351,3 +9532,41 @@ mlx5_dbg__print_pattern(const struct rte_flow_item *item)
        }
        printf("END\n");
 }
+
+static int
+mlx5_flow_is_std_vxlan_port(const struct rte_flow_item *udp_item)
+{
+       const struct rte_flow_item_udp *spec = udp_item->spec;
+       const struct rte_flow_item_udp *mask = udp_item->mask;
+       uint16_t udp_dport = 0;
+
+       if (spec != NULL) {
+               if (!mask)
+                       mask = &rte_flow_item_udp_mask;
+               udp_dport = rte_be_to_cpu_16(spec->hdr.dst_port &
+                               mask->hdr.dst_port);
+       }
+       return (!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN);
+}
+
+static const struct mlx5_flow_expand_node *
+mlx5_flow_expand_rss_adjust_node(const struct rte_flow_item *pattern,
+               unsigned int item_idx,
+               const struct mlx5_flow_expand_node graph[],
+               const struct mlx5_flow_expand_node *node)
+{
+       const struct rte_flow_item *item = pattern + item_idx, *prev_item;
+       switch (item->type) {
+       case RTE_FLOW_ITEM_TYPE_VXLAN:
+               MLX5_ASSERT(item_idx > 0);
+               prev_item = pattern + item_idx - 1;
+               MLX5_ASSERT(prev_item->type == RTE_FLOW_ITEM_TYPE_UDP);
+               if (mlx5_flow_is_std_vxlan_port(prev_item))
+                       return &graph[MLX5_EXPANSION_STD_VXLAN];
+               else
+                       return &graph[MLX5_EXPANSION_L3_VXLAN];
+               break;
+       default:
+               return node;
+       }
+}