net/mlx5: add flow translate function
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index 513f70d..6c30466 100644 (file)
@@ -3,7 +3,9 @@
  * Copyright 2016 Mellanox Technologies, Ltd
  */
 
+#include <netinet/in.h>
 #include <sys/queue.h>
+#include <stdalign.h>
 #include <stdint.h>
 #include <string.h>
 
 #include "mlx5_defs.h"
 #include "mlx5_prm.h"
 #include "mlx5_glue.h"
+#include "mlx5_flow.h"
 
 /* Dev ops structure defined in mlx5.c */
 extern const struct eth_dev_ops mlx5_dev_ops;
 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
 
-/* Pattern Layer bits. */
-#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
-#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
-#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
-#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
-#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
-#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
-/* Masks. */
-#define MLX5_FLOW_LAYER_OUTER_L3 \
-       (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
-#define MLX5_FLOW_LAYER_OUTER_L4 \
-       (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
-
-/* Actions that modify the fate of matching traffic. */
-#define MLX5_FLOW_FATE_DROP (1u << 0)
-#define MLX5_FLOW_FATE_QUEUE (1u << 1)
-
-/** Handles information leading to a drop fate. */
-struct mlx5_flow_verbs {
-       unsigned int size; /**< Size of the attribute. */
-       struct {
-               struct ibv_flow_attr *attr;
-               /**< Pointer to the Specification buffer. */
-               uint8_t *specs; /**< Pointer to the specifications. */
-       };
-       struct ibv_flow *flow; /**< Verbs flow pointer. */
-       struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+enum mlx5_expansion {
+       MLX5_EXPANSION_ROOT,
+       MLX5_EXPANSION_ROOT_OUTER,
+       MLX5_EXPANSION_ROOT_ETH_VLAN,
+       MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
+       MLX5_EXPANSION_OUTER_ETH,
+       MLX5_EXPANSION_OUTER_ETH_VLAN,
+       MLX5_EXPANSION_OUTER_VLAN,
+       MLX5_EXPANSION_OUTER_IPV4,
+       MLX5_EXPANSION_OUTER_IPV4_UDP,
+       MLX5_EXPANSION_OUTER_IPV4_TCP,
+       MLX5_EXPANSION_OUTER_IPV6,
+       MLX5_EXPANSION_OUTER_IPV6_UDP,
+       MLX5_EXPANSION_OUTER_IPV6_TCP,
+       MLX5_EXPANSION_VXLAN,
+       MLX5_EXPANSION_VXLAN_GPE,
+       MLX5_EXPANSION_GRE,
+       MLX5_EXPANSION_MPLS,
+       MLX5_EXPANSION_ETH,
+       MLX5_EXPANSION_ETH_VLAN,
+       MLX5_EXPANSION_VLAN,
+       MLX5_EXPANSION_IPV4,
+       MLX5_EXPANSION_IPV4_UDP,
+       MLX5_EXPANSION_IPV4_TCP,
+       MLX5_EXPANSION_IPV6,
+       MLX5_EXPANSION_IPV6_UDP,
+       MLX5_EXPANSION_IPV6_TCP,
 };
 
-/* Flow structure. */
-struct rte_flow {
-       TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
-       struct rte_flow_attr attributes; /**< User flow attribute. */
-       uint32_t layers;
-       /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
-       uint32_t fate;
-       /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
-       struct mlx5_flow_verbs verbs; /* Verbs flow. */
-       uint16_t queue; /**< Destination queue to redirect traffic to. */
+/** Supported expansion of items. */
+static const struct rte_flow_expand_node mlx5_support_expansion[] = {
+       [MLX5_EXPANSION_ROOT] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+                                                MLX5_EXPANSION_IPV4,
+                                                MLX5_EXPANSION_IPV6),
+               .type = RTE_FLOW_ITEM_TYPE_END,
+       },
+       [MLX5_EXPANSION_ROOT_OUTER] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
+                                                MLX5_EXPANSION_OUTER_IPV4,
+                                                MLX5_EXPANSION_OUTER_IPV6),
+               .type = RTE_FLOW_ITEM_TYPE_END,
+       },
+       [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
+               .type = RTE_FLOW_ITEM_TYPE_END,
+       },
+       [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
+               .type = RTE_FLOW_ITEM_TYPE_END,
+       },
+       [MLX5_EXPANSION_OUTER_ETH] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+                                                MLX5_EXPANSION_OUTER_IPV6,
+                                                MLX5_EXPANSION_MPLS),
+               .type = RTE_FLOW_ITEM_TYPE_ETH,
+               .rss_types = 0,
+       },
+       [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
+               .type = RTE_FLOW_ITEM_TYPE_ETH,
+               .rss_types = 0,
+       },
+       [MLX5_EXPANSION_OUTER_VLAN] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+                                                MLX5_EXPANSION_OUTER_IPV6),
+               .type = RTE_FLOW_ITEM_TYPE_VLAN,
+       },
+       [MLX5_EXPANSION_OUTER_IPV4] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT
+                       (MLX5_EXPANSION_OUTER_IPV4_UDP,
+                        MLX5_EXPANSION_OUTER_IPV4_TCP,
+                        MLX5_EXPANSION_GRE),
+               .type = RTE_FLOW_ITEM_TYPE_IPV4,
+               .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+                       ETH_RSS_NONFRAG_IPV4_OTHER,
+       },
+       [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+                                                MLX5_EXPANSION_VXLAN_GPE),
+               .type = RTE_FLOW_ITEM_TYPE_UDP,
+               .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+       },
+       [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
+               .type = RTE_FLOW_ITEM_TYPE_TCP,
+               .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+       },
+       [MLX5_EXPANSION_OUTER_IPV6] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT
+                       (MLX5_EXPANSION_OUTER_IPV6_UDP,
+                        MLX5_EXPANSION_OUTER_IPV6_TCP),
+               .type = RTE_FLOW_ITEM_TYPE_IPV6,
+               .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+                       ETH_RSS_NONFRAG_IPV6_OTHER,
+       },
+       [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+                                                MLX5_EXPANSION_VXLAN_GPE),
+               .type = RTE_FLOW_ITEM_TYPE_UDP,
+               .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+       },
+       [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
+               .type = RTE_FLOW_ITEM_TYPE_TCP,
+               .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+       },
+       [MLX5_EXPANSION_VXLAN] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+               .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+       },
+       [MLX5_EXPANSION_VXLAN_GPE] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+                                                MLX5_EXPANSION_IPV4,
+                                                MLX5_EXPANSION_IPV6),
+               .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+       },
+       [MLX5_EXPANSION_GRE] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
+               .type = RTE_FLOW_ITEM_TYPE_GRE,
+       },
+       [MLX5_EXPANSION_MPLS] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+                                                MLX5_EXPANSION_IPV6),
+               .type = RTE_FLOW_ITEM_TYPE_MPLS,
+       },
+       [MLX5_EXPANSION_ETH] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+                                                MLX5_EXPANSION_IPV6),
+               .type = RTE_FLOW_ITEM_TYPE_ETH,
+       },
+       [MLX5_EXPANSION_ETH_VLAN] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
+               .type = RTE_FLOW_ITEM_TYPE_ETH,
+       },
+       [MLX5_EXPANSION_VLAN] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+                                                MLX5_EXPANSION_IPV6),
+               .type = RTE_FLOW_ITEM_TYPE_VLAN,
+       },
+       [MLX5_EXPANSION_IPV4] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
+                                                MLX5_EXPANSION_IPV4_TCP),
+               .type = RTE_FLOW_ITEM_TYPE_IPV4,
+               .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+                       ETH_RSS_NONFRAG_IPV4_OTHER,
+       },
+       [MLX5_EXPANSION_IPV4_UDP] = {
+               .type = RTE_FLOW_ITEM_TYPE_UDP,
+               .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+       },
+       [MLX5_EXPANSION_IPV4_TCP] = {
+               .type = RTE_FLOW_ITEM_TYPE_TCP,
+               .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+       },
+       [MLX5_EXPANSION_IPV6] = {
+               .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
+                                                MLX5_EXPANSION_IPV6_TCP),
+               .type = RTE_FLOW_ITEM_TYPE_IPV6,
+               .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+                       ETH_RSS_NONFRAG_IPV6_OTHER,
+       },
+       [MLX5_EXPANSION_IPV6_UDP] = {
+               .type = RTE_FLOW_ITEM_TYPE_UDP,
+               .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+       },
+       [MLX5_EXPANSION_IPV6_TCP] = {
+               .type = RTE_FLOW_ITEM_TYPE_TCP,
+               .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
+       },
 };
 
 static const struct rte_flow_ops mlx5_flow_ops = {
@@ -82,6 +214,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {
        .destroy = mlx5_flow_destroy,
        .flush = mlx5_flow_flush,
        .isolate = mlx5_flow_isolate,
+       .query = mlx5_flow_query,
 };
 
 /* Convert FDIR request to Generic flow. */
@@ -110,22 +243,59 @@ struct mlx5_fdir {
        struct rte_flow_action_queue queue;
 };
 
-/* Verbs specification header. */
-struct ibv_spec_header {
-       enum ibv_flow_spec_type type;
-       uint16_t size;
+/* Map of Verbs to Flow priority with 8 Verbs priorities. */
+static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
+       { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
 };
 
- /**
-  * Discover the maximum number of priority available.
-  *
-  * @param[in] dev
-  *   Pointer to Ethernet device.
-  *
-  * @return
-  *   number of supported flow priority on success, a negative errno value
-  *   otherwise and rte_errno is set.
-  */
+/* Map of Verbs to Flow priority with 16 Verbs priorities. */
+static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
+       { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
+       { 9, 10, 11 }, { 12, 13, 14 },
+};
+
+/* Tunnel information. */
+struct mlx5_flow_tunnel_info {
+       uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
+       uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
+};
+
+static struct mlx5_flow_tunnel_info tunnels_info[] = {
+       {
+               .tunnel = MLX5_FLOW_LAYER_VXLAN,
+               .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
+       },
+       {
+               .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
+               .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
+       },
+       {
+               .tunnel = MLX5_FLOW_LAYER_GRE,
+               .ptype = RTE_PTYPE_TUNNEL_GRE,
+       },
+       {
+               .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
+               .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
+       },
+       {
+               .tunnel = MLX5_FLOW_LAYER_MPLS,
+               .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
+       },
+};
+
+/* Holds the nic operations that should be used. */
+struct mlx5_flow_driver_ops nic_ops;
+
+/**
+ * Discover the maximum number of priority available.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ *
+ * @return
+ *   number of supported flow priority on success, a negative errno
+ *   value otherwise and rte_errno is set.
+ */
 int
 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
 {
@@ -150,6 +320,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
        struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
        uint16_t vprio[] = { 8, 16 };
        int i;
+       int priority = 0;
 
        if (!drop) {
                rte_errno = ENOTSUP;
@@ -161,65 +332,56 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
                if (!flow)
                        break;
                claim_zero(mlx5_glue->destroy_flow(flow));
+               priority = vprio[i];
+       }
+       switch (priority) {
+       case 8:
+               priority = RTE_DIM(priority_map_3);
+               break;
+       case 16:
+               priority = RTE_DIM(priority_map_5);
+               break;
+       default:
+               rte_errno = ENOTSUP;
+               DRV_LOG(ERR,
+                       "port %u verbs maximum priority: %d expected 8/16",
+                       dev->data->port_id, vprio[i]);
+               return -rte_errno;
        }
        mlx5_hrxq_drop_release(dev);
        DRV_LOG(INFO, "port %u flow maximum priority: %d",
-               dev->data->port_id, vprio[i - 1]);
-       return vprio[i - 1];
+               dev->data->port_id, priority);
+       return priority;
 }
 
 /**
- * Verify the @p attributes will be correctly understood by the NIC and store
- * them in the @p flow if everything is correct.
+ * Adjust flow priority based on the highest layer and the request priority.
  *
  * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in] attributes
- *   Pointer to flow attributes
- * @param[in, out] flow
- *   Pointer to the rte_flow structure.
- * @param[out] error
- *   Pointer to error structure.
+ *   Pointer to the Ethernet device structure.
+ * @param[in] priority
+ *   The rule base priority.
+ * @param[in] subpriority
+ *   The priority based on the items.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
+ *   The new priority.
  */
-static int
-mlx5_flow_attributes(struct rte_eth_dev *dev,
-                    const struct rte_flow_attr *attributes,
-                    struct rte_flow *flow,
-                    struct rte_flow_error *error)
+uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+                                  uint32_t subpriority)
 {
-       uint32_t priority_max =
-               ((struct priv *)dev->data->dev_private)->config.flow_prio;
+       uint32_t res = 0;
+       struct priv *priv = dev->data->dev_private;
 
-       if (attributes->group)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
-                                         NULL,
-                                         "groups is not supported");
-       if (attributes->priority >= priority_max)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
-                                         NULL,
-                                         "priority out of range");
-       if (attributes->egress)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
-                                         NULL,
-                                         "egress is not supported");
-       if (attributes->transfer)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
-                                         NULL,
-                                         "transfer is not supported");
-       if (!attributes->ingress)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
-                                         NULL,
-                                         "ingress attribute is mandatory");
-       flow->attributes = *attributes;
-       return 0;
+       switch (priv->config.flow_prio) {
+       case RTE_DIM(priority_map_3):
+               res = priority_map_3[priority][subpriority];
+               break;
+       case RTE_DIM(priority_map_5):
+               res = priority_map_5[priority][subpriority];
+               break;
+       }
+       return  res;
 }
 
 /**
@@ -259,8 +421,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
                                                  " bits");
        if (!item->spec && (item->mask || item->last))
                return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM,
-                                         item,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "mask/last without a spec is not"
                                          " supported");
        if (item->spec && item->last) {
@@ -284,162 +445,508 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
 }
 
 /**
- * Add a verbs specification into @p flow.
+ * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
+ * if several tunnel rules are used on this queue, the tunnel ptype will be
+ * cleared.
  *
- * @param[in, out] flow
+ * @param rxq_ctrl
+ *   Rx queue to update.
+ */
+static void
+mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+       unsigned int i;
+       uint32_t tunnel_ptype = 0;
+
+       /* Look up for the ptype to use. */
+       for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
+               if (!rxq_ctrl->flow_tunnels_n[i])
+                       continue;
+               if (!tunnel_ptype) {
+                       tunnel_ptype = tunnels_info[i].ptype;
+               } else {
+                       tunnel_ptype = 0;
+                       break;
+               }
+       }
+       rxq_ctrl->rxq.tunnel = tunnel_ptype;
+}
+
+/**
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] flow
  *   Pointer to flow structure.
- * @param[in] src
- *   Create specification.
- * @param[in] size
- *   Size in bytes of the specification to copy.
  */
 static void
-mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
+mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-       if (flow->verbs.specs) {
-               void *dst;
+       struct priv *priv = dev->data->dev_private;
+       const int mark = !!(flow->actions &
+                           (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+       const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+       unsigned int i;
+
+       for (i = 0; i != flow->rss.queue_num; ++i) {
+               int idx = (*flow->queue)[i];
+               struct mlx5_rxq_ctrl *rxq_ctrl =
+                       container_of((*priv->rxqs)[idx],
+                                    struct mlx5_rxq_ctrl, rxq);
+
+               if (mark) {
+                       rxq_ctrl->rxq.mark = 1;
+                       rxq_ctrl->flow_mark_n++;
+               }
+               if (tunnel) {
+                       unsigned int j;
+
+                       /* Increase the counter matching the flow. */
+                       for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+                               if ((tunnels_info[j].tunnel & flow->layers) ==
+                                   tunnels_info[j].tunnel) {
+                                       rxq_ctrl->flow_tunnels_n[j]++;
+                                       break;
+                               }
+                       }
+                       mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
+               }
+       }
+}
+
+/**
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * @p flow if no other flow uses it with the same kind of request.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param[in] flow
+ *   Pointer to the flow.
+ */
+static void
+mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+       struct priv *priv = dev->data->dev_private;
+       const int mark = !!(flow->actions &
+                           (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+       const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+       unsigned int i;
 
-               dst = (void *)(flow->verbs.specs + flow->verbs.size);
-               memcpy(dst, src, size);
-               ++flow->verbs.attr->num_of_specs;
+       assert(dev->data->dev_started);
+       for (i = 0; i != flow->rss.queue_num; ++i) {
+               int idx = (*flow->queue)[i];
+               struct mlx5_rxq_ctrl *rxq_ctrl =
+                       container_of((*priv->rxqs)[idx],
+                                    struct mlx5_rxq_ctrl, rxq);
+
+               if (mark) {
+                       rxq_ctrl->flow_mark_n--;
+                       rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
+               }
+               if (tunnel) {
+                       unsigned int j;
+
+                       /* Decrease the counter matching the flow. */
+                       for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+                               if ((tunnels_info[j].tunnel & flow->layers) ==
+                                   tunnels_info[j].tunnel) {
+                                       rxq_ctrl->flow_tunnels_n[j]--;
+                                       break;
+                               }
+                       }
+                       mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
+               }
        }
-       flow->verbs.size += size;
 }
 
 /**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+static void
+mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
+{
+       struct priv *priv = dev->data->dev_private;
+       unsigned int i;
+
+       for (i = 0; i != priv->rxqs_n; ++i) {
+               struct mlx5_rxq_ctrl *rxq_ctrl;
+               unsigned int j;
+
+               if (!(*priv->rxqs)[i])
+                       continue;
+               rxq_ctrl = container_of((*priv->rxqs)[i],
+                                       struct mlx5_rxq_ctrl, rxq);
+               rxq_ctrl->flow_mark_n = 0;
+               rxq_ctrl->rxq.mark = 0;
+               for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
+                       rxq_ctrl->flow_tunnels_n[j] = 0;
+               rxq_ctrl->rxq.tunnel = 0;
+       }
+}
+
+/*
+ * Validate the flag action.
+ *
+ * @param[in] action_flags
+ *   Bit-fields that holds the actions detected until now.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_flag(uint64_t action_flags,
+                              struct rte_flow_error *error)
+{
+
+       if (action_flags & MLX5_FLOW_ACTION_DROP)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "can't drop and flag in same flow");
+       if (action_flags & MLX5_FLOW_ACTION_MARK)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "can't mark and flag in same flow");
+       if (action_flags & MLX5_FLOW_ACTION_FLAG)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "can't have 2 flag"
+                                         " actions in same flow");
+       return 0;
+}
+
+/*
+ * Validate the mark action.
+ *
+ * @param[in] action
+ *   Pointer to the queue action.
+ * @param[in] action_flags
+ *   Bit-fields that holds the actions detected until now.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
+                              uint64_t action_flags,
+                              struct rte_flow_error *error)
+{
+       const struct rte_flow_action_mark *mark = action->conf;
+
+       if (!mark)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION,
+                                         action,
+                                         "configuration cannot be null");
+       if (mark->id >= MLX5_FLOW_MARK_MAX)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &mark->id,
+                                         "mark id must in 0 <= id < "
+                                         RTE_STR(MLX5_FLOW_MARK_MAX));
+       if (action_flags & MLX5_FLOW_ACTION_DROP)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "can't drop and mark in same flow");
+       if (action_flags & MLX5_FLOW_ACTION_FLAG)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "can't flag and mark in same flow");
+       if (action_flags & MLX5_FLOW_ACTION_MARK)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "can't have 2 mark actions in same"
+                                         " flow");
+       return 0;
+}
+
+/*
+ * Validate the drop action.
+ *
+ * @param[in] action_flags
+ *   Bit-fields that holds the actions detected until now.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+int
+mlx5_flow_validate_action_drop(uint64_t action_flags,
+                              struct rte_flow_error *error)
+{
+       if (action_flags & MLX5_FLOW_ACTION_FLAG)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "can't drop and flag in same flow");
+       if (action_flags & MLX5_FLOW_ACTION_MARK)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "can't drop and mark in same flow");
+       if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "can't have 2 fate actions in"
+                                         " same flow");
+       return 0;
+}
+
+/*
+ * Validate the queue action.
+ *
+ * @param[in] action
+ *   Pointer to the queue action.
+ * @param[in] action_flags
+ *   Bit-fields that holds the actions detected until now.
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+int
+mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
+                               uint64_t action_flags,
+                               struct rte_eth_dev *dev,
+                               struct rte_flow_error *error)
+{
+       struct priv *priv = dev->data->dev_private;
+       const struct rte_flow_action_queue *queue = action->conf;
+
+       if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "can't have 2 fate actions in"
+                                         " same flow");
+       if (queue->index >= priv->rxqs_n)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &queue->index,
+                                         "queue index out of range");
+       if (!(*priv->rxqs)[queue->index])
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &queue->index,
+                                         "queue is not configured");
+       return 0;
+}
+
+/*
+ * Validate the rss action.
+ *
+ * @param[in] action
+ *   Pointer to the queue action.
+ * @param[in] action_flags
+ *   Bit-fields that holds the actions detected until now.
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+int
+mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
+                             uint64_t action_flags,
+                             struct rte_eth_dev *dev,
+                             struct rte_flow_error *error)
+{
+       struct priv *priv = dev->data->dev_private;
+       const struct rte_flow_action_rss *rss = action->conf;
+       unsigned int i;
+
+       if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "can't have 2 fate actions"
+                                         " in same flow");
+       if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+           rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->func,
+                                         "RSS hash function not supported");
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+       if (rss->level > 2)
+#else
+       if (rss->level > 1)
+#endif
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->level,
+                                         "tunnel RSS is not supported");
+       if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->key_len,
+                                         "RSS hash key too small");
+       if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->key_len,
+                                         "RSS hash key too large");
+       if (rss->queue_num > priv->config.ind_table_max_size)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->queue_num,
+                                         "number of queues too large");
+       if (rss->types & MLX5_RSS_HF_MASK)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->types,
+                                         "some RSS protocols are not"
+                                         " supported");
+       for (i = 0; i != rss->queue_num; ++i) {
+               if (!(*priv->rxqs)[rss->queue[i]])
+                       return rte_flow_error_set
+                               (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                &rss->queue[i], "queue is not configured");
+       }
+       return 0;
+}
+
+/*
+ * Validate the count action.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+int
+mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
+                               struct rte_flow_error *error)
+{
+       struct priv *priv = dev->data->dev_private;
+
+       if (!priv->config.flow_counter_en)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "flow counters are not supported.");
+       return 0;
+}
+
+/**
+ * Verify the @p attributes will be correctly understood by the NIC and store
+ * them in the @p flow if everything is correct.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] attributes
+ *   Pointer to flow attributes
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
+                             const struct rte_flow_attr *attributes,
+                             struct rte_flow_error *error)
+{
+       struct priv *priv = dev->data->dev_private;
+       uint32_t priority_max = priv->config.flow_prio - 1;
+
+       if (attributes->group)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+                                         NULL, "groups is not supported");
+       if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+           attributes->priority >= priority_max)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+                                         NULL, "priority out of range");
+       if (attributes->egress)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+                                         "egress is not supported");
+       if (attributes->transfer)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                                         NULL, "transfer is not supported");
+       if (!attributes->ingress)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+                                         NULL,
+                                         "ingress attribute is mandatory");
+       return 0;
+}
+
+/**
+ * Validate Ethernet item.
  *
  * @param[in] item
  *   Item specification.
- * @param[in, out] flow
- *   Pointer to flow structure.
- * @param[in] flow_size
- *   Size in bytes of the available space in @p flow, if too small, nothing is
- *   written.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
  * @param[out] error
  *   Pointer to error structure.
  *
  * @return
- *   On success the number of bytes consumed/necessary, if the returned value
- *   is lesser or equal to @p flow_size, the @p item has fully been converted,
- *   otherwise another call with this returned memory size should be done.
- *   On error, a negative errno value is returned and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
-                  const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
+                           uint64_t item_flags,
+                           struct rte_flow_error *error)
 {
-       const struct rte_flow_item_eth *spec = item->spec;
        const struct rte_flow_item_eth *mask = item->mask;
        const struct rte_flow_item_eth nic_mask = {
                .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
                .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
                .type = RTE_BE16(0xffff),
        };
-       const unsigned int size = sizeof(struct ibv_flow_spec_eth);
-       struct ibv_flow_spec_eth eth = {
-               .type = IBV_FLOW_SPEC_ETH,
-               .size = size,
-       };
        int ret;
+       int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 
-       if (flow->layers & MLX5_FLOW_LAYER_OUTER_L2)
+       if (item_flags & MLX5_FLOW_LAYER_OUTER_L2)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "3 levels of l2 are not supported");
+       if ((item_flags & MLX5_FLOW_LAYER_INNER_L2) && !tunnel)
                return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM,
-                                         item,
-                                         "L2 layers already configured");
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "2 L2 without tunnel are not supported");
        if (!mask)
                mask = &rte_flow_item_eth_mask;
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_eth),
                                        error);
-       if (ret)
-               return ret;
-       flow->layers |= MLX5_FLOW_LAYER_OUTER_L2;
-       if (size > flow_size)
-               return size;
-       if (spec) {
-               unsigned int i;
-
-               memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
-               memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
-               eth.val.ether_type = spec->type;
-               memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
-               memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
-               eth.mask.ether_type = mask->type;
-               /* Remove unwanted bits from values. */
-               for (i = 0; i < ETHER_ADDR_LEN; ++i) {
-                       eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
-                       eth.val.src_mac[i] &= eth.mask.src_mac[i];
-               }
-               eth.val.ether_type &= eth.mask.ether_type;
-       }
-       mlx5_flow_spec_verbs_add(flow, &eth, size);
-       return size;
-}
-
-/**
- * Update the VLAN tag in the Verbs Ethernet specification.
- *
- * @param[in, out] attr
- *   Pointer to Verbs attributes structure.
- * @param[in] eth
- *   Verbs structure containing the VLAN information to copy.
- */
-static void
-mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
-                          struct ibv_flow_spec_eth *eth)
-{
-       unsigned int i;
-       enum ibv_flow_spec_type search = IBV_FLOW_SPEC_ETH;
-       struct ibv_spec_header *hdr = (struct ibv_spec_header *)
-               ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
-
-       for (i = 0; i != attr->num_of_specs; ++i) {
-               if (hdr->type == search) {
-                       struct ibv_flow_spec_eth *e =
-                               (struct ibv_flow_spec_eth *)hdr;
-
-                       e->val.vlan_tag = eth->val.vlan_tag;
-                       e->mask.vlan_tag = eth->mask.vlan_tag;
-                       e->val.ether_type = eth->val.ether_type;
-                       e->mask.ether_type = eth->mask.ether_type;
-                       break;
-               }
-               hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
-       }
+       return ret;
 }
 
 /**
- * Convert the @p item into @p flow (or by updating the already present
- * Ethernet Verbs) specification after ensuring the NIC will understand and
- * process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate VLAN item.
  *
  * @param[in] item
  *   Item specification.
- * @param[in, out] flow
- *   Pointer to flow structure.
- * @param[in] flow_size
- *   Size in bytes of the available space in @p flow, if too small, nothing is
- *   written.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
  * @param[out] error
  *   Pointer to error structure.
  *
  * @return
- *   On success the number of bytes consumed/necessary, if the returned value
- *   is lesser or equal to @p flow_size, the @p item has fully been converted,
- *   otherwise another call with this returned memory size should be done.
- *   On error, a negative errno value is returned and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
-                   const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
+                            int64_t item_flags,
+                            struct rte_flow_error *error)
 {
        const struct rte_flow_item_vlan *spec = item->spec;
        const struct rte_flow_item_vlan *mask = item->mask;
@@ -447,92 +954,66 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
                .tci = RTE_BE16(0x0fff),
                .inner_type = RTE_BE16(0xffff),
        };
-       unsigned int size = sizeof(struct ibv_flow_spec_eth);
-       struct ibv_flow_spec_eth eth = {
-               .type = IBV_FLOW_SPEC_ETH,
-               .size = size,
-       };
+       uint16_t vlan_tag = 0;
+       const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
        int ret;
-       const uint32_t l34m = MLX5_FLOW_LAYER_OUTER_L3 |
-                       MLX5_FLOW_LAYER_OUTER_L4;
-       const uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN;
-       const uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2;
-
-       if (flow->layers & vlanm)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM,
-                                         item,
+       const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+                                       MLX5_FLOW_LAYER_INNER_L4) :
+                                      (MLX5_FLOW_LAYER_OUTER_L3 |
+                                       MLX5_FLOW_LAYER_OUTER_L4);
+       const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+                                       MLX5_FLOW_LAYER_OUTER_VLAN;
+
+       if (item_flags & vlanm)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "VLAN layer already configured");
-       else if ((flow->layers & l34m) != 0)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM,
-                                         item,
+       else if ((item_flags & l34m) != 0)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L2 layer cannot follow L3/L4 layer");
        if (!mask)
                mask = &rte_flow_item_vlan_mask;
-       ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
-                (const uint8_t *)&nic_mask,
-                sizeof(struct rte_flow_item_vlan), error);
+       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+                                       (const uint8_t *)&nic_mask,
+                                       sizeof(struct rte_flow_item_vlan),
+                                       error);
        if (ret)
                return ret;
        if (spec) {
-               eth.val.vlan_tag = spec->tci;
-               eth.mask.vlan_tag = mask->tci;
-               eth.val.vlan_tag &= eth.mask.vlan_tag;
-               eth.val.ether_type = spec->inner_type;
-               eth.mask.ether_type = mask->inner_type;
-               eth.val.ether_type &= eth.mask.ether_type;
+               vlan_tag = spec->tci;
+               vlan_tag &= mask->tci;
        }
        /*
         * From verbs perspective an empty VLAN is equivalent
         * to a packet without VLAN layer.
         */
-       if (!eth.mask.vlan_tag)
+       if (!vlan_tag)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
                                          item->spec,
                                          "VLAN cannot be empty");
-       if (!(flow->layers & l2m)) {
-               if (size <= flow_size)
-                       mlx5_flow_spec_verbs_add(flow, &eth, size);
-       } else {
-               if (flow->verbs.attr)
-                       mlx5_flow_item_vlan_update(flow->verbs.attr, &eth);
-               size = 0; /* Only an update is done in eth specification. */
-       }
-       flow->layers |= MLX5_FLOW_LAYER_OUTER_L2 |
-               MLX5_FLOW_LAYER_OUTER_VLAN;
-       return size;
+       return 0;
 }
 
 /**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate IPV4 item.
  *
  * @param[in] item
  *   Item specification.
- * @param[in, out] flow
- *   Pointer to flow structure.
- * @param[in] flow_size
- *   Size in bytes of the available space in @p flow, if too small, nothing is
- *   written.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
  * @param[out] error
  *   Pointer to error structure.
  *
  * @return
- *   On success the number of bytes consumed/necessary, if the returned value
- *   is lesser or equal to @p flow_size, the @p item has fully been converted,
- *   otherwise another call with this returned memory size should be done.
- *   On error, a negative errno value is returned and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
-                   const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
+                            int64_t item_flags,
+                            struct rte_flow_error *error)
 {
-       const struct rte_flow_item_ipv4 *spec = item->spec;
        const struct rte_flow_item_ipv4 *mask = item->mask;
        const struct rte_flow_item_ipv4 nic_mask = {
                .hdr = {
@@ -542,84 +1023,48 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
                        .next_proto_id = 0xff,
                },
        };
-       unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
-       struct ibv_flow_spec_ipv4_ext ipv4 = {
-               .type = IBV_FLOW_SPEC_IPV4_EXT,
-               .size = size,
-       };
+       const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
        int ret;
 
-       if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
+       if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+                                  MLX5_FLOW_LAYER_OUTER_L3))
                return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM,
-                                         item,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "multiple L3 layers not supported");
-       else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM,
-                                         item,
+       else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+                                       MLX5_FLOW_LAYER_OUTER_L4))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 cannot follow an L4 layer.");
        if (!mask)
                mask = &rte_flow_item_ipv4_mask;
-       ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
-                (const uint8_t *)&nic_mask,
-                sizeof(struct rte_flow_item_ipv4), error);
+       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+                                       (const uint8_t *)&nic_mask,
+                                       sizeof(struct rte_flow_item_ipv4),
+                                       error);
        if (ret < 0)
                return ret;
-       flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
-       if (size > flow_size)
-               return size;
-       if (spec) {
-               ipv4.val = (struct ibv_flow_ipv4_ext_filter){
-                       .src_ip = spec->hdr.src_addr,
-                       .dst_ip = spec->hdr.dst_addr,
-                       .proto = spec->hdr.next_proto_id,
-                       .tos = spec->hdr.type_of_service,
-               };
-               ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
-                       .src_ip = mask->hdr.src_addr,
-                       .dst_ip = mask->hdr.dst_addr,
-                       .proto = mask->hdr.next_proto_id,
-                       .tos = mask->hdr.type_of_service,
-               };
-               /* Remove unwanted bits from values. */
-               ipv4.val.src_ip &= ipv4.mask.src_ip;
-               ipv4.val.dst_ip &= ipv4.mask.dst_ip;
-               ipv4.val.proto &= ipv4.mask.proto;
-               ipv4.val.tos &= ipv4.mask.tos;
-       }
-       mlx5_flow_spec_verbs_add(flow, &ipv4, size);
-       return size;
+       return 0;
 }
 
 /**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate IPV6 item.
  *
  * @param[in] item
  *   Item specification.
- * @param[in, out] flow
- *   Pointer to flow structure.
- * @param[in] flow_size
- *   Size in bytes of the available space in @p flow, if too small, nothing is
- *   written.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
  * @param[out] error
  *   Pointer to error structure.
  *
  * @return
- *   On success the number of bytes consumed/necessary, if the returned value
- *   is lesser or equal to @p flow_size, the @p item has fully been converted,
- *   otherwise another call with this returned memory size should be done.
- *   On error, a negative errno value is returned and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
-                   const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
+                            uint64_t item_flags,
+                            struct rte_flow_error *error)
 {
-       const struct rte_flow_item_ipv6 *spec = item->spec;
        const struct rte_flow_item_ipv6 *mask = item->mask;
        const struct rte_flow_item_ipv6 nic_mask = {
                .hdr = {
@@ -634,376 +1079,418 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
                        .hop_limits = 0xff,
                },
        };
-       unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
-       struct ibv_flow_spec_ipv6 ipv6 = {
-               .type = IBV_FLOW_SPEC_IPV6,
-               .size = size,
-       };
+       const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
        int ret;
 
-       if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
+       if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+                                  MLX5_FLOW_LAYER_OUTER_L3))
                return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM,
-                                         item,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "multiple L3 layers not supported");
-       else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM,
-                                         item,
+       else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+                                       MLX5_FLOW_LAYER_OUTER_L4))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "L3 cannot follow an L4 layer.");
+       /*
+        * IPv6 is not recognised by the NIC inside a GRE tunnel.
+        * Such support has to be disabled as the rule will be
+        * accepted.  Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
+        * Mellanox OFED 4.4-1.0.0.0.
+        */
+       if (tunnel && item_flags & MLX5_FLOW_LAYER_GRE)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "IPv6 inside a GRE tunnel is"
+                                         " not recognised.");
        if (!mask)
                mask = &rte_flow_item_ipv6_mask;
-       ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
-                (const uint8_t *)&nic_mask,
-                sizeof(struct rte_flow_item_ipv6), error);
+       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+                                       (const uint8_t *)&nic_mask,
+                                       sizeof(struct rte_flow_item_ipv6),
+                                       error);
        if (ret < 0)
                return ret;
-       flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
-       if (size > flow_size)
-               return size;
-       if (spec) {
-               unsigned int i;
-               uint32_t vtc_flow_val;
-               uint32_t vtc_flow_mask;
-
-               memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
-                      RTE_DIM(ipv6.val.src_ip));
-               memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
-                      RTE_DIM(ipv6.val.dst_ip));
-               memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
-                      RTE_DIM(ipv6.mask.src_ip));
-               memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
-                      RTE_DIM(ipv6.mask.dst_ip));
-               vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
-               vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
-               ipv6.val.flow_label =
-                       rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
-                                        IPV6_HDR_FL_SHIFT);
-               ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
-                                        IPV6_HDR_TC_SHIFT;
-               ipv6.val.next_hdr = spec->hdr.proto;
-               ipv6.val.hop_limit = spec->hdr.hop_limits;
-               ipv6.mask.flow_label =
-                       rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
-                                        IPV6_HDR_FL_SHIFT);
-               ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
-                                         IPV6_HDR_TC_SHIFT;
-               ipv6.mask.next_hdr = mask->hdr.proto;
-               ipv6.mask.hop_limit = mask->hdr.hop_limits;
-               /* Remove unwanted bits from values. */
-               for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
-                       ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
-                       ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
-               }
-               ipv6.val.flow_label &= ipv6.mask.flow_label;
-               ipv6.val.traffic_class &= ipv6.mask.traffic_class;
-               ipv6.val.next_hdr &= ipv6.mask.next_hdr;
-               ipv6.val.hop_limit &= ipv6.mask.hop_limit;
-       }
-       mlx5_flow_spec_verbs_add(flow, &ipv6, size);
-       return size;
+       return 0;
 }
 
 /**
- * Convert the @p pattern into a Verbs specifications after ensuring the NIC
- * will understand and process it correctly.
- * The conversion is performed item per item, each of them is written into
- * the @p flow if its size is lesser or equal to @p flow_size.
- * Validation and memory consumption computation are still performed until the
- * end of @p pattern, unless an error is encountered.
- *
- * @param[in] pattern
- *   Flow pattern.
- * @param[in, out] flow
- *   Pointer to the rte_flow structure.
- * @param[in] flow_size
- *   Size in bytes of the available space in @p flow, if too small some
- *   garbage may be present.
+ * Validate UDP item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ *   The next protocol in the previous item.
  * @param[out] error
  *   Pointer to error structure.
  *
  * @return
- *   On success the number of bytes consumed/necessary, if the returned value
- *   is lesser or equal to @p flow_size, the @pattern  has fully been
- *   converted, otherwise another call with this returned memory size should
- *   be done.
- *   On error, a negative errno value is returned and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx5_flow_items(const struct rte_flow_item pattern[],
-               struct rte_flow *flow, const size_t flow_size,
-               struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
+                           uint64_t item_flags,
+                           uint8_t target_protocol,
+                           struct rte_flow_error *error)
 {
-       int remain = flow_size;
-       size_t size = 0;
+       const struct rte_flow_item_udp *mask = item->mask;
+       const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+       int ret;
 
-       for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
-               int ret = 0;
+       if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "protocol filtering not compatible"
+                                         " with UDP layer");
+       if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+                                    MLX5_FLOW_LAYER_OUTER_L3)))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "L3 is mandatory to filter on L4");
+       if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+                                  MLX5_FLOW_LAYER_OUTER_L4))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "L4 layer is already present");
+       if (!mask)
+               mask = &rte_flow_item_udp_mask;
+       ret = mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&rte_flow_item_udp_mask,
+                sizeof(struct rte_flow_item_udp), error);
+       if (ret < 0)
+               return ret;
+       return 0;
+}
 
-               switch (pattern->type) {
-               case RTE_FLOW_ITEM_TYPE_VOID:
-                       break;
-               case RTE_FLOW_ITEM_TYPE_ETH:
-                       ret = mlx5_flow_item_eth(pattern, flow, remain, error);
-                       break;
-               case RTE_FLOW_ITEM_TYPE_VLAN:
-                       ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
-                       break;
-               case RTE_FLOW_ITEM_TYPE_IPV4:
-                       ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
-                       break;
-               case RTE_FLOW_ITEM_TYPE_IPV6:
-                       ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
-                       break;
-               default:
-                       return rte_flow_error_set(error, ENOTSUP,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
-                                                 pattern,
-                                                 "item not supported");
-               }
-               if (ret < 0)
-                       return ret;
-               if (remain > ret)
-                       remain -= ret;
-               else
-                       remain = 0;
-               size += ret;
-       }
-       if (!flow->layers) {
-               const struct rte_flow_item item = {
-                       .type = RTE_FLOW_ITEM_TYPE_ETH,
-               };
+/**
+ * Validate TCP item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ *   The next protocol in the previous item.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
+                           uint64_t item_flags,
+                           uint8_t target_protocol,
+                           struct rte_flow_error *error)
+{
+       const struct rte_flow_item_tcp *mask = item->mask;
+       const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+       int ret;
 
-               return mlx5_flow_item_eth(&item, flow, flow_size, error);
-       }
-       return size;
+       if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "protocol filtering not compatible"
+                                         " with TCP layer");
+       if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+                                    MLX5_FLOW_LAYER_OUTER_L3)))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "L3 is mandatory to filter on L4");
+       if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+                                  MLX5_FLOW_LAYER_OUTER_L4))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "L4 layer is already present");
+       if (!mask)
+               mask = &rte_flow_item_tcp_mask;
+       ret = mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&rte_flow_item_tcp_mask,
+                sizeof(struct rte_flow_item_tcp), error);
+       if (ret < 0)
+               return ret;
+       return 0;
 }
 
 /**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate VXLAN item.
  *
- * @param[in] action
- *   Action configuration.
- * @param[in, out] flow
- *   Pointer to flow structure.
- * @param[in] flow_size
- *   Size in bytes of the available space in @p flow, if too small, nothing is
- *   written.
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ *   The next protocol in the previous item.
  * @param[out] error
  *   Pointer to error structure.
  *
  * @return
- *   On success the number of bytes consumed/necessary, if the returned value
- *   is lesser or equal to @p flow_size, the @p action has fully been
- *   converted, otherwise another call with this returned memory size should
- *   be done.
- *   On error, a negative errno value is returned and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx5_flow_action_drop(const struct rte_flow_action *action,
-                     struct rte_flow *flow, const size_t flow_size,
-                     struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
+                             uint64_t item_flags,
+                             struct rte_flow_error *error)
 {
-       unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
-       struct ibv_flow_spec_action_drop drop = {
-                       .type = IBV_FLOW_SPEC_ACTION_DROP,
-                       .size = size,
-       };
+       const struct rte_flow_item_vxlan *spec = item->spec;
+       const struct rte_flow_item_vxlan *mask = item->mask;
+       int ret;
+       union vni {
+               uint32_t vlan_id;
+               uint8_t vni[4];
+       } id = { .vlan_id = 0, };
+       uint32_t vlan_id = 0;
+
 
-       if (flow->fate)
+       if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
                return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ACTION,
-                                         action,
-                                         "multiple fate actions are not"
-                                         " supported");
-       if (size < flow_size)
-               mlx5_flow_spec_verbs_add(flow, &drop, size);
-       flow->fate |= MLX5_FLOW_FATE_DROP;
-       return size;
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "a tunnel is already present");
+       /*
+        * Verify only UDPv4 is present as defined in
+        * https://tools.ietf.org/html/rfc7348
+        */
+       if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "no outer UDP layer found");
+       if (!mask)
+               mask = &rte_flow_item_vxlan_mask;
+       ret = mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&rte_flow_item_vxlan_mask,
+                sizeof(struct rte_flow_item_vxlan),
+                error);
+       if (ret < 0)
+               return ret;
+       if (spec) {
+               memcpy(&id.vni[1], spec->vni, 3);
+               vlan_id = id.vlan_id;
+               memcpy(&id.vni[1], mask->vni, 3);
+               vlan_id &= id.vlan_id;
+       }
+       /*
+        * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
+        * only this layer is defined in the Verbs specification it is
+        * interpreted as wildcard and all packets will match this
+        * rule, if it follows a full stack layer (ex: eth / ipv4 /
+        * udp), all packets matching the layers before will also
+        * match this rule.  To avoid such situation, VNI 0 is
+        * currently refused.
+        */
+       if (!vlan_id)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "VXLAN vni cannot be 0");
+       if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "VXLAN tunnel must be fully defined");
+       return 0;
 }
 
 /**
- * Convert the @p action into @p flow after ensuring the NIC will understand
- * and process it correctly.
+ * Validate VXLAN_GPE item.
  *
- * @param[in] dev
- *   Pointer to Ethernet device structure.
- * @param[in] action
- *   Action configuration.
- * @param[in, out] flow
- *   Pointer to flow structure.
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[in] priv
+ *   Pointer to the private data structure.
+ * @param[in] target_protocol
+ *   The next protocol in the previous item.
  * @param[out] error
  *   Pointer to error structure.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx5_flow_action_queue(struct rte_eth_dev *dev,
-                      const struct rte_flow_action *action,
-                      struct rte_flow *flow,
-                      struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
+                                 uint64_t item_flags,
+                                 struct rte_eth_dev *dev,
+                                 struct rte_flow_error *error)
 {
        struct priv *priv = dev->data->dev_private;
-       const struct rte_flow_action_queue *queue = action->conf;
+       const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+       const struct rte_flow_item_vxlan_gpe *mask = item->mask;
+       int ret;
+       union vni {
+               uint32_t vlan_id;
+               uint8_t vni[4];
+       } id = { .vlan_id = 0, };
+       uint32_t vlan_id = 0;
 
-       if (flow->fate)
+       if (!priv->config.l3_vxlan_en)
                return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ACTION,
-                                         action,
-                                         "multiple fate actions are not"
-                                         " supported");
-       if (queue->index >= priv->rxqs_n)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                         &queue->index,
-                                         "queue index out of range");
-       if (!(*priv->rxqs)[queue->index])
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "L3 VXLAN is not enabled by device"
+                                         " parameter and/or not configured in"
+                                         " firmware");
+       if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "a tunnel is already present");
+       /*
+        * Verify only UDPv4 is present as defined in
+        * https://tools.ietf.org/html/rfc7348
+        */
+       if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
                return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-                                         &queue->index,
-                                         "queue is not configured");
-       flow->queue = queue->index;
-       flow->fate |= MLX5_FLOW_FATE_QUEUE;
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "no outer UDP layer found");
+       if (!mask)
+               mask = &rte_flow_item_vxlan_gpe_mask;
+       ret = mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
+                sizeof(struct rte_flow_item_vxlan_gpe),
+                error);
+       if (ret < 0)
+               return ret;
+       if (spec) {
+               if (spec->protocol)
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item,
+                                                 "VxLAN-GPE protocol"
+                                                 " not supported");
+               memcpy(&id.vni[1], spec->vni, 3);
+               vlan_id = id.vlan_id;
+               memcpy(&id.vni[1], mask->vni, 3);
+               vlan_id &= id.vlan_id;
+       }
+       /*
+        * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+        * layer is defined in the Verbs specification it is interpreted as
+        * wildcard and all packets will match this rule, if it follows a full
+        * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+        * before will also match this rule.  To avoid such situation, VNI 0
+        * is currently refused.
+        */
+       if (!vlan_id)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "VXLAN-GPE vni cannot be 0");
+       if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "VXLAN-GPE tunnel must be fully"
+                                         " defined");
        return 0;
 }
 
 /**
- * Convert the @p action into @p flow after ensuring the NIC will understand
- * and process it correctly.
- * The conversion is performed action per action, each of them is written into
- * the @p flow if its size is lesser or equal to @p flow_size.
- * Validation and memory consumption computation are still performed until the
- * end of @p action, unless an error is encountered.
+ * Validate GRE item.
  *
- * @param[in] dev
- *   Pointer to Ethernet device structure.
- * @param[in] actions
- *   Pointer to flow actions array.
- * @param[in, out] flow
- *   Pointer to the rte_flow structure.
- * @param[in] flow_size
- *   Size in bytes of the available space in @p flow, if too small some
- *   garbage may be present.
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit flags to mark detected items.
+ * @param[in] target_protocol
+ *   The next protocol in the previous item.
  * @param[out] error
  *   Pointer to error structure.
  *
  * @return
- *   On success the number of bytes consumed/necessary, if the returned value
- *   is lesser or equal to @p flow_size, the @p actions has fully been
- *   converted, otherwise another call with this returned memory size should
- *   be done.
- *   On error, a negative errno value is returned and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx5_flow_actions(struct rte_eth_dev *dev,
-                 const struct rte_flow_action actions[],
-                 struct rte_flow *flow, const size_t flow_size,
-                 struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
+                           uint64_t item_flags,
+                           uint8_t target_protocol,
+                           struct rte_flow_error *error)
 {
-       size_t size = 0;
-       int remain = flow_size;
-       int ret = 0;
+       const struct rte_flow_item_gre *spec __rte_unused = item->spec;
+       const struct rte_flow_item_gre *mask = item->mask;
+       int ret;
 
-       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
-               switch (actions->type) {
-               case RTE_FLOW_ACTION_TYPE_VOID:
-                       break;
-               case RTE_FLOW_ACTION_TYPE_DROP:
-                       ret = mlx5_flow_action_drop(actions, flow, remain,
-                                                   error);
-                       break;
-               case RTE_FLOW_ACTION_TYPE_QUEUE:
-                       ret = mlx5_flow_action_queue(dev, actions, flow, error);
-                       break;
-               default:
-                       return rte_flow_error_set(error, ENOTSUP,
-                                                 RTE_FLOW_ERROR_TYPE_ACTION,
-                                                 actions,
-                                                 "action not supported");
-               }
-               if (ret < 0)
-                       return ret;
-               if (remain > ret)
-                       remain -= ret;
-               else
-                       remain = 0;
-               size += ret;
-       }
-       if (!flow->fate)
+       if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "protocol filtering not compatible"
+                                         " with this GRE layer");
+       if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
                return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                         NULL,
-                                         "no fate action found");
-       return size;
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "a tunnel is already present");
+       if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "L3 Layer is missing");
+       if (!mask)
+               mask = &rte_flow_item_gre_mask;
+       ret = mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&rte_flow_item_gre_mask,
+                sizeof(struct rte_flow_item_gre), error);
+       if (ret < 0)
+               return ret;
+#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
+       if (spec && (spec->protocol & mask->protocol))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "without MPLS support the"
+                                         " specification cannot be used for"
+                                         " filtering");
+#endif
+       return 0;
 }
 
 /**
- * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
- * after ensuring the NIC will understand and process it correctly.
- * The conversion is only performed item/action per item/action, each of
- * them is written into the @p flow if its size is lesser or equal to @p
- * flow_size.
- * Validation and memory consumption computation are still performed until the
- * end, unless an error is encountered.
+ * Validate MPLS item.
  *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in, out] flow
- *   Pointer to flow structure.
- * @param[in] flow_size
- *   Size in bytes of the available space in @p flow, if too small some
- *   garbage may be present.
- * @param[in] attributes
- *   Flow rule attributes.
- * @param[in] pattern
- *   Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- *   Associated actions (list terminated by the END action).
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ *   The next protocol in the previous item.
  * @param[out] error
- *   Perform verbose error reporting if not NULL.
+ *   Pointer to error structure.
  *
  * @return
- *   On success the number of bytes consumed/necessary, if the returned value
- *   is lesser or equal to @p flow_size, the flow has fully been converted and
- *   can be applied, otherwise another call with this returned memory size
- *   should be done.
- *   On error, a negative errno value is returned and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
-               const size_t flow_size,
-               const struct rte_flow_attr *attributes,
-               const struct rte_flow_item pattern[],
-               const struct rte_flow_action actions[],
-               struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
+                            uint64_t item_flags __rte_unused,
+                            uint8_t target_protocol __rte_unused,
+                            struct rte_flow_error *error)
 {
-       struct rte_flow local_flow = { .layers = 0, };
-       size_t size = sizeof(*flow) + sizeof(struct ibv_flow_attr);
-       int remain = (flow_size > size) ? flow_size - size : 0;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+       const struct rte_flow_item_mpls *mask = item->mask;
        int ret;
 
-       if (!remain)
-               flow = &local_flow;
-       ret = mlx5_flow_attributes(dev, attributes, flow, error);
-       if (ret < 0)
-               return ret;
-       ret = mlx5_flow_items(pattern, flow, remain, error);
-       if (ret < 0)
-               return ret;
-       size += ret;
-       remain = (flow_size > size) ? flow_size - size : 0;
-       ret = mlx5_flow_actions(dev, actions, flow, remain, error);
+       if (target_protocol != 0xff && target_protocol != IPPROTO_MPLS)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "protocol filtering not compatible"
+                                         " with MPLS layer");
+       if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "a tunnel is already"
+                                         " present");
+       if (!mask)
+               mask = &rte_flow_item_mpls_mask;
+       ret = mlx5_flow_item_acceptable
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&rte_flow_item_mpls_mask,
+                sizeof(struct rte_flow_item_mpls), error);
        if (ret < 0)
                return ret;
-       size += ret;
-       if (size <= flow_size)
-               flow->verbs.attr->priority = flow->attributes.priority;
-       return size;
+       return 0;
+#endif
+       return rte_flow_error_set(error, ENOTSUP,
+                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                 "MPLS is not supported by Verbs, please"
+                                 " update.");
 }
 
 /**
@@ -1019,96 +1506,55 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
                   const struct rte_flow_action actions[],
                   struct rte_flow_error *error)
 {
-       int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
+       int ret;
 
+       ret =  nic_ops.validate(dev, attr, items, actions, error);
        if (ret < 0)
                return ret;
        return 0;
 }
 
 /**
- * Remove the flow.
+ * Get RSS action from the action list.
  *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in, out] flow
- *   Pointer to flow structure.
+ * @param[in] actions
+ *   Pointer to the list of actions.
+ *
+ * @return
+ *   Pointer to the RSS action if exist, else return NULL.
  */
-static void
-mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+static const struct rte_flow_action_rss*
+mlx5_flow_get_rss_action(const struct rte_flow_action actions[])
 {
-       if (flow->fate & MLX5_FLOW_FATE_DROP) {
-               if (flow->verbs.flow) {
-                       claim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));
-                       flow->verbs.flow = NULL;
+       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+               switch (actions->type) {
+               case RTE_FLOW_ACTION_TYPE_RSS:
+                       return (const struct rte_flow_action_rss *)
+                              actions->conf;
+               default:
+                       break;
                }
        }
-       if (flow->verbs.hrxq) {
-               if (flow->fate & MLX5_FLOW_FATE_DROP)
-                       mlx5_hrxq_drop_release(dev);
-               else if (flow->fate & MLX5_FLOW_FATE_QUEUE)
-                       mlx5_hrxq_release(dev, flow->verbs.hrxq);
-               flow->verbs.hrxq = NULL;
-       }
+       return NULL;
 }
 
-/**
- * Apply the flow.
- *
- * @param[in] dev
- *   Pointer to Ethernet device structure.
- * @param[in, out] flow
- *   Pointer to flow structure.
- * @param[out] error
- *   Pointer to error structure.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
-               struct rte_flow_error *error)
+static unsigned int
+mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
 {
-       if (flow->fate & MLX5_FLOW_FATE_DROP) {
-               flow->verbs.hrxq = mlx5_hrxq_drop_new(dev);
-               if (!flow->verbs.hrxq)
-                       return rte_flow_error_set
-                               (error, errno,
-                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                NULL,
-                                "cannot allocate Drop queue");
-       } else if (flow->fate & MLX5_FLOW_FATE_QUEUE) {
-               struct mlx5_hrxq *hrxq;
-
-               hrxq = mlx5_hrxq_get(dev, rss_hash_default_key,
-                                    rss_hash_default_key_len, 0,
-                                    &flow->queue, 1, 0, 0);
-               if (!hrxq)
-                       hrxq = mlx5_hrxq_new(dev, rss_hash_default_key,
-                                            rss_hash_default_key_len, 0,
-                                            &flow->queue, 1, 0, 0);
-               if (!hrxq)
-                       return rte_flow_error_set(error, rte_errno,
-                                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                       NULL,
-                                       "cannot create flow");
-               flow->verbs.hrxq = hrxq;
-       }
-       flow->verbs.flow =
-               mlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);
-       if (!flow->verbs.flow) {
-               if (flow->fate & MLX5_FLOW_FATE_DROP)
-                       mlx5_hrxq_drop_release(dev);
-               else
-                       mlx5_hrxq_release(dev, flow->verbs.hrxq);
-               flow->verbs.hrxq = NULL;
-               return rte_flow_error_set(error, errno,
-                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                         NULL,
-                                         "kernel module refuses to create"
-                                         " flow");
+       const struct rte_flow_item *item;
+       unsigned int has_vlan = 0;
+
+       for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+               if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+                       has_vlan = 1;
+                       break;
+               }
        }
-       return 0;
+       if (has_vlan)
+               return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
+                                      MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
+       return rss_level < 2 ? MLX5_EXPANSION_ROOT :
+                              MLX5_EXPANSION_ROOT_OUTER;
 }
 
 /**
@@ -1138,38 +1584,73 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
                      const struct rte_flow_action actions[],
                      struct rte_flow_error *error)
 {
-       struct rte_flow *flow;
-       size_t size;
+       struct rte_flow *flow = NULL;
+       struct mlx5_flow *dev_flow;
+       uint64_t action_flags = 0;
+       uint64_t item_flags = 0;
+       const struct rte_flow_action_rss *rss;
+       union {
+               struct rte_flow_expand_rss buf;
+               uint8_t buffer[2048];
+       } expand_buffer;
+       struct rte_flow_expand_rss *buf = &expand_buffer.buf;
        int ret;
+       uint32_t i;
+       uint32_t flow_size;
 
-       ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
+       ret = mlx5_flow_validate(dev, attr, items, actions, error);
        if (ret < 0)
                return NULL;
-       size = ret;
-       flow = rte_zmalloc(__func__, size, 0);
-       if (!flow) {
-               rte_flow_error_set(error, ENOMEM,
-                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                  NULL,
-                                  "cannot allocate memory");
-               return NULL;
+       flow_size = sizeof(struct rte_flow);
+       rss = mlx5_flow_get_rss_action(actions);
+       if (rss)
+               flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
+                                           sizeof(void *));
+       else
+               flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
+       flow = rte_calloc(__func__, 1, flow_size, 0);
+       flow->queue = (void *)(flow + 1);
+       LIST_INIT(&flow->dev_flows);
+       if (rss && rss->types) {
+               unsigned int graph_root;
+
+               graph_root = mlx5_find_graph_root(items, rss->level);
+               ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
+                                         items, rss->types,
+                                         mlx5_support_expansion,
+                                         graph_root);
+               assert(ret > 0 &&
+                      (unsigned int)ret < sizeof(expand_buffer.buffer));
+       } else {
+               buf->entries = 1;
+               buf->entry[0].pattern = (void *)(uintptr_t)items;
+       }
+       for (i = 0; i < buf->entries; ++i) {
+               dev_flow = nic_ops.prepare(attr, buf->entry[i].pattern,
+                                          actions, &item_flags,
+                                          &action_flags, error);
+               if (!dev_flow)
+                       goto error;
+               dev_flow->flow = flow;
+               LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+               ret = nic_ops.translate(dev, dev_flow, attr,
+                                       buf->entry[i].pattern,
+                                       actions, error);
+               if (ret < 0)
+                       goto error;
        }
-       flow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);
-       flow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);
-       ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
-       if (ret < 0)
-               goto error;
-       assert((size_t)ret == size);
        if (dev->data->dev_started) {
-               ret = mlx5_flow_apply(dev, flow, error);
+               ret = nic_ops.apply(dev, flow, error);
                if (ret < 0)
                        goto error;
        }
        TAILQ_INSERT_TAIL(list, flow, next);
+       mlx5_flow_rxq_flags_set(dev, flow);
        return flow;
 error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
-       mlx5_flow_remove(dev, flow);
+       assert(flow);
+       nic_ops.destroy(dev, flow);
        rte_free(flow);
        rte_errno = ret; /* Restore rte_errno. */
        return NULL;
@@ -1207,8 +1688,14 @@ static void
 mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
                       struct rte_flow *flow)
 {
-       mlx5_flow_remove(dev, flow);
+       nic_ops.destroy(dev, flow);
        TAILQ_REMOVE(list, flow, next);
+       /*
+        * Update RX queue flags only if port is started, otherwise it is
+        * already clean.
+        */
+       if (dev->data->dev_started)
+               mlx5_flow_rxq_flags_trim(dev, flow);
        rte_free(flow);
 }
 
@@ -1245,7 +1732,8 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
        struct rte_flow *flow;
 
        TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
-               mlx5_flow_remove(dev, flow);
+               nic_ops.remove(dev, flow);
+       mlx5_flow_rxq_flags_clear(dev);
 }
 
 /**
@@ -1267,9 +1755,10 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
        int ret = 0;
 
        TAILQ_FOREACH(flow, list, next) {
-               ret = mlx5_flow_apply(dev, flow, &error);
+               ret = nic_ops.apply(dev, flow, &error);
                if (ret < 0)
                        goto error;
+               mlx5_flow_rxq_flags_set(dev, flow);
        }
        return 0;
 error:
@@ -1329,7 +1818,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
        struct priv *priv = dev->data->dev_private;
        const struct rte_flow_attr attr = {
                .ingress = 1,
-               .priority = priv->config.flow_prio - 1,
+               .priority = MLX5_FLOW_PRIO_RSVD,
        };
        struct rte_flow_item items[] = {
                {
@@ -1340,7 +1829,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
                },
                {
                        .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
-                               RTE_FLOW_ITEM_TYPE_END,
+                                             RTE_FLOW_ITEM_TYPE_END,
                        .spec = vlan_spec,
                        .last = NULL,
                        .mask = vlan_mask,
@@ -1467,6 +1956,96 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
        return 0;
 }
 
+/**
+ * Query flow counter.
+ *
+ * @param flow
+ *   Pointer to the flow.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_query_count(struct rte_flow *flow __rte_unused,
+                     void *data __rte_unused,
+                     struct rte_flow_error *error)
+{
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+       if (flow->actions & MLX5_FLOW_ACTION_COUNT) {
+               struct rte_flow_query_count *qc = data;
+               uint64_t counters[2] = {0, 0};
+               struct ibv_query_counter_set_attr query_cs_attr = {
+                       .cs = flow->counter->cs,
+                       .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
+               };
+               struct ibv_counter_set_data query_out = {
+                       .out = counters,
+                       .outlen = 2 * sizeof(uint64_t),
+               };
+               int err = mlx5_glue->query_counter_set(&query_cs_attr,
+                                                      &query_out);
+
+               if (err)
+                       return rte_flow_error_set
+                               (error, err,
+                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                NULL,
+                                "cannot read counter");
+               qc->hits_set = 1;
+               qc->bytes_set = 1;
+               qc->hits = counters[0] - flow->counter->hits;
+               qc->bytes = counters[1] - flow->counter->bytes;
+               if (qc->reset) {
+                       flow->counter->hits = counters[0];
+                       flow->counter->bytes = counters[1];
+               }
+               return 0;
+       }
+       return rte_flow_error_set(error, ENOTSUP,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                 NULL,
+                                 "flow does not have counter");
+#endif
+       return rte_flow_error_set(error, ENOTSUP,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                 NULL,
+                                 "counters are not available");
+}
+
+/**
+ * Query a flows.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
+               struct rte_flow *flow,
+               const struct rte_flow_action *actions,
+               void *data,
+               struct rte_flow_error *error)
+{
+       int ret = 0;
+
+       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+               switch (actions->type) {
+               case RTE_FLOW_ACTION_TYPE_VOID:
+                       break;
+               case RTE_FLOW_ACTION_TYPE_COUNT:
+                       ret = mlx5_flow_query_count(flow, data, error);
+                       break;
+               default:
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 actions,
+                                                 "action not supported");
+               }
+               if (ret < 0)
+                       return ret;
+       }
+       return 0;
+}
+
 /**
  * Convert a flow director filter to a generic flow.
  *
@@ -1533,7 +2112,6 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
                        .dst_addr = input->flow.ip4_flow.dst_ip,
                        .time_to_live = input->flow.ip4_flow.ttl,
                        .type_of_service = input->flow.ip4_flow.tos,
-                       .next_proto_id = input->flow.ip4_flow.proto,
                };
                attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
                        .src_addr = mask->ipv4_mask.src_ip,
@@ -1868,3 +2446,15 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
        }
        return 0;
 }
+
+/**
+ * Init the driver ops structure.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ */
+void
+mlx5_flow_init_driver_ops(struct rte_eth_dev *dev __rte_unused)
+{
+       mlx5_flow_verbs_get_driver_ops(&nic_ops);
+}