net: add rte prefix to ether structures
[dpdk.git] / drivers / net / i40e / i40e_flow.c
index c7589ce..559ca92 100644 (file)
@@ -1,33 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
  */
 
 #include <sys/queue.h>
 #include <unistd.h>
 #include <stdarg.h>
 
+#include <rte_debug.h>
 #include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_log.h>
-#include <rte_memzone.h>
 #include <rte_malloc.h>
-#include <rte_eth_ctrl.h>
 #include <rte_tailq.h>
 #include <rte_flow_driver.h>
 
@@ -52,8 +23,7 @@
 #include "base/i40e_prototype.h"
 #include "i40e_ethdev.h"
 
-#define I40E_IPV4_TC_SHIFT     4
-#define I40E_IPV6_TC_MASK      (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_TC_MASK      (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
 #define I40E_IPV6_FRAG_HEADER  44
 #define I40E_TENANT_ARRAY_NUM  3
 #define I40E_TCI_MASK          0xFFFF
@@ -83,13 +53,14 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
                                    struct rte_flow_error *error,
                                    struct rte_eth_ethertype_filter *filter);
 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+                                       const struct rte_flow_attr *attr,
                                        const struct rte_flow_item *pattern,
                                        struct rte_flow_error *error,
-                                       struct rte_eth_fdir_filter *filter);
+                                       struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
                                       const struct rte_flow_action *actions,
                                       struct rte_flow_error *error,
-                                      struct rte_eth_fdir_filter *filter);
+                                      struct i40e_fdir_filter_conf *filter);
 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
                                 const struct rte_flow_action *actions,
                                 struct rte_flow_error *error,
@@ -126,6 +97,12 @@ static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
                                       const struct rte_flow_action actions[],
                                       struct rte_flow_error *error,
                                       union i40e_filter_t *filter);
+static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+                                     const struct rte_flow_attr *attr,
+                                     const struct rte_flow_item pattern[],
+                                     const struct rte_flow_action actions[],
+                                     struct rte_flow_error *error,
+                                     union i40e_filter_t *filter);
 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
                                      struct i40e_ethertype_filter *filter);
 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -134,6 +111,8 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
 static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
+static int
 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
                              const struct rte_flow_attr *attr,
                              const struct rte_flow_item pattern[],
@@ -153,8 +132,8 @@ const struct rte_flow_ops i40e_flow_ops = {
        .flush = i40e_flow_flush,
 };
 
-union i40e_filter_t cons_filter;
-enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
+static union i40e_filter_t cons_filter;
+static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
 
 /* Pattern matched ethertype filter */
 static enum rte_flow_item_type pattern_ethertype[] = {
@@ -190,6 +169,40 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
        RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_GTPC,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_GTPU,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_GTPU,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_GTPU,
+       RTE_FLOW_ITEM_TYPE_IPV6,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
        RTE_FLOW_ITEM_TYPE_ETH,
        RTE_FLOW_ITEM_TYPE_IPV6,
@@ -217,6 +230,40 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
        RTE_FLOW_ITEM_TYPE_END,
 };
 
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV6,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_GTPC,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV6,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_GTPU,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV6,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_GTPU,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV6,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_GTPU,
+       RTE_FLOW_ITEM_TYPE_IPV6,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
        RTE_FLOW_ITEM_TYPE_ETH,
        RTE_FLOW_ITEM_TYPE_RAW,
@@ -1577,10 +1624,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
        { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
        { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
        { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+       { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
+       { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
+       { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+       { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
        { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
        { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
        { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
        { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+       { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
+       { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
+       { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+       { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
        /* FDIR - support default flow type with flexible payload */
        { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
        { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
@@ -1733,6 +1788,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
        { pattern_mpls_2, i40e_flow_parse_mpls_filter },
        { pattern_mpls_3, i40e_flow_parse_mpls_filter },
        { pattern_mpls_4, i40e_flow_parse_mpls_filter },
+       /* GTP-C & GTP-U */
+       { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
+       { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
+       { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
+       { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
        /* QINQ */
        { pattern_qinq_1, i40e_flow_parse_qinq_filter },
 };
@@ -1880,7 +1940,8 @@ static uint16_t
 i40e_get_outer_vlan(struct rte_eth_dev *dev)
 {
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+       int qinq = dev->data->dev_conf.rxmode.offloads &
+               DEV_RX_OFFLOAD_VLAN_EXTEND;
        uint64_t reg_r = 0;
        uint16_t reg_id;
        uint16_t tpid;
@@ -1930,8 +1991,8 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
                item_type = item->type;
                switch (item_type) {
                case RTE_FLOW_ITEM_TYPE_ETH:
-                       eth_spec = (const struct rte_flow_item_eth *)item->spec;
-                       eth_mask = (const struct rte_flow_item_eth *)item->mask;
+                       eth_spec = item->spec;
+                       eth_mask = item->mask;
                        /* Get the MAC info. */
                        if (!eth_spec || !eth_mask) {
                                rte_flow_error_set(error, EINVAL,
@@ -2016,7 +2077,7 @@ i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
        }
 
        if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
-               act_q = (const struct rte_flow_action_queue *)act->conf;
+               act_q = act->conf;
                filter->queue = act_q->index;
                if (filter->queue >= pf->dev_data->nb_rx_queues) {
                        rte_flow_error_set(error, EINVAL,
@@ -2191,11 +2252,18 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
                            uint8_t raw_id)
 {
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
-       uint32_t flx_pit;
+       uint32_t flx_pit, flx_ort;
        uint8_t field_idx;
        uint16_t min_next_off = 0;  /* in words */
        uint8_t i;
 
+       if (raw_id) {
+               flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
+                         (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
+                         (layer_idx * I40E_MAX_FLXPLD_FIED);
+               I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
+       }
+
        /* Set flex pit */
        for (i = 0; i < raw_id; i++) {
                field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
@@ -2303,20 +2371,59 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf,
        return 0;
 }
 
+static uint8_t
+i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
+                               enum rte_flow_item_type item_type,
+                               struct i40e_fdir_filter_conf *filter)
+{
+       struct i40e_customized_pctype *cus_pctype = NULL;
+
+       switch (item_type) {
+       case RTE_FLOW_ITEM_TYPE_GTPC:
+               cus_pctype = i40e_find_customized_pctype(pf,
+                                                        I40E_CUSTOMIZED_GTPC);
+               break;
+       case RTE_FLOW_ITEM_TYPE_GTPU:
+               if (!filter->input.flow_ext.inner_ip)
+                       cus_pctype = i40e_find_customized_pctype(pf,
+                                                        I40E_CUSTOMIZED_GTPU);
+               else if (filter->input.flow_ext.iip_type ==
+                        I40E_FDIR_IPTYPE_IPV4)
+                       cus_pctype = i40e_find_customized_pctype(pf,
+                                                I40E_CUSTOMIZED_GTPU_IPV4);
+               else if (filter->input.flow_ext.iip_type ==
+                        I40E_FDIR_IPTYPE_IPV6)
+                       cus_pctype = i40e_find_customized_pctype(pf,
+                                                I40E_CUSTOMIZED_GTPU_IPV6);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Unsupported item type");
+               break;
+       }
+
+       if (cus_pctype && cus_pctype->valid)
+               return cus_pctype->pctype;
+
+       return I40E_FILTER_PCTYPE_INVALID;
+}
+
 /* 1. Last in item should be NULL as range is not supported.
  * 2. Supported patterns: refer to array i40e_supported_patterns.
- * 3. Supported flow type and input set: refer to array
+ * 3. Default supported flow type and input set: refer to array
  *    valid_fdir_inset_table in i40e_ethdev.c.
  * 4. Mask of fields which need to be matched should be
  *    filled with 1.
  * 5. Mask of fields which needn't to be matched should be
  *    filled with 0.
+ * 6. GTP profile supports GTPv1 only.
+ * 7. GTP-C response message ('source_port' = 2123) is not supported.
  */
 static int
 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+                            const struct rte_flow_attr *attr,
                             const struct rte_flow_item *pattern,
                             struct rte_flow_error *error,
-                            struct rte_eth_fdir_filter *filter)
+                            struct i40e_fdir_filter_conf *filter)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        const struct rte_flow_item *item = pattern;
@@ -2327,15 +2434,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
        const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
        const struct rte_flow_item_udp *udp_spec, *udp_mask;
        const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+       const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
        const struct rte_flow_item_raw *raw_spec, *raw_mask;
        const struct rte_flow_item_vf *vf_spec;
 
-       uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
-       enum i40e_filter_pctype pctype;
+       uint8_t pctype = 0;
        uint64_t input_set = I40E_INSET_NONE;
        uint16_t frag_off;
        enum rte_flow_item_type item_type;
        enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+       enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
        uint32_t i, j;
        uint8_t  ipv6_addr_mask[16] = {
                0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -2352,12 +2460,15 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
        bool cfg_flex_msk = true;
        uint16_t outer_tpid;
        uint16_t ether_type;
+       uint32_t vtc_flow_cpu;
+       bool outer_ip = true;
        int ret;
 
-       memset(off_arr, 0, I40E_MAX_FLXPLD_FIED);
-       memset(len_arr, 0, I40E_MAX_FLXPLD_FIED);
+       memset(off_arr, 0, sizeof(off_arr));
+       memset(len_arr, 0, sizeof(len_arr));
        memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
        outer_tpid = i40e_get_outer_vlan(dev);
+       filter->input.flow_ext.customized_pctype = false;
        for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
                if (item->last) {
                        rte_flow_error_set(error, EINVAL,
@@ -2369,8 +2480,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                item_type = item->type;
                switch (item_type) {
                case RTE_FLOW_ITEM_TYPE_ETH:
-                       eth_spec = (const struct rte_flow_item_eth *)item->spec;
-                       eth_mask = (const struct rte_flow_item_eth *)item->mask;
+                       eth_spec = item->spec;
+                       eth_mask = item->mask;
 
                        if (eth_spec && eth_mask) {
                                if (!is_zero_ether_addr(&eth_mask->src) ||
@@ -2381,16 +2492,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                                      "Invalid MAC_addr mask.");
                                        return -rte_errno;
                                }
+                       }
+                       if (eth_spec && eth_mask && eth_mask->type) {
+                               enum rte_flow_item_type next = (item + 1)->type;
 
-                               if ((eth_mask->type & UINT16_MAX) ==
-                                   UINT16_MAX) {
-                                       input_set |= I40E_INSET_LAST_ETHER_TYPE;
-                                       filter->input.flow.l2_flow.ether_type =
-                                               eth_spec->type;
+                               if (eth_mask->type != RTE_BE16(0xffff)) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                     RTE_FLOW_ERROR_TYPE_ITEM,
+                                                     item,
+                                                     "Invalid type mask.");
+                                       return -rte_errno;
                                }
 
                                ether_type = rte_be_to_cpu_16(eth_spec->type);
-                               if (ether_type == ETHER_TYPE_IPv4 ||
+
+                               if (next == RTE_FLOW_ITEM_TYPE_VLAN ||
+                                   ether_type == ETHER_TYPE_IPv4 ||
                                    ether_type == ETHER_TYPE_IPv6 ||
                                    ether_type == ETHER_TYPE_ARP ||
                                    ether_type == outer_tpid) {
@@ -2400,17 +2517,20 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                                     "Unsupported ether_type.");
                                        return -rte_errno;
                                }
+                               input_set |= I40E_INSET_LAST_ETHER_TYPE;
+                               filter->input.flow.l2_flow.ether_type =
+                                       eth_spec->type;
                        }
 
-                       flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+                       pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
                        layer_idx = I40E_FLXPLD_L2_IDX;
 
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
-                       vlan_spec =
-                               (const struct rte_flow_item_vlan *)item->spec;
-                       vlan_mask =
-                               (const struct rte_flow_item_vlan *)item->mask;
+                       vlan_spec = item->spec;
+                       vlan_mask = item->mask;
+
+                       RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
                        if (vlan_spec && vlan_mask) {
                                if (vlan_mask->tci ==
                                    rte_cpu_to_be_16(I40E_TCI_MASK)) {
@@ -2419,19 +2539,46 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                                vlan_spec->tci;
                                }
                        }
+                       if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
+                               if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                     RTE_FLOW_ERROR_TYPE_ITEM,
+                                                     item,
+                                                     "Invalid inner_type"
+                                                     " mask.");
+                                       return -rte_errno;
+                               }
+
+                               ether_type =
+                                       rte_be_to_cpu_16(vlan_spec->inner_type);
+
+                               if (ether_type == ETHER_TYPE_IPv4 ||
+                                   ether_type == ETHER_TYPE_IPv6 ||
+                                   ether_type == ETHER_TYPE_ARP ||
+                                   ether_type == outer_tpid) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                    RTE_FLOW_ERROR_TYPE_ITEM,
+                                                    item,
+                                                    "Unsupported inner_type.");
+                                       return -rte_errno;
+                               }
+                               input_set |= I40E_INSET_LAST_ETHER_TYPE;
+                               filter->input.flow.l2_flow.ether_type =
+                                       vlan_spec->inner_type;
+                       }
 
-                       flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+                       pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
                        layer_idx = I40E_FLXPLD_L2_IDX;
 
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        l3 = RTE_FLOW_ITEM_TYPE_IPV4;
-                       ipv4_spec =
-                               (const struct rte_flow_item_ipv4 *)item->spec;
-                       ipv4_mask =
-                               (const struct rte_flow_item_ipv4 *)item->mask;
+                       ipv4_spec = item->spec;
+                       ipv4_mask = item->mask;
+                       pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+                       layer_idx = I40E_FLXPLD_L3_IDX;
 
-                       if (ipv4_spec && ipv4_mask) {
+                       if (ipv4_spec && ipv4_mask && outer_ip) {
                                /* Check IPv4 mask and update input set */
                                if (ipv4_mask->hdr.version_ihl ||
                                    ipv4_mask->hdr.total_length ||
@@ -2456,14 +2603,12 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
                                        input_set |= I40E_INSET_IPV4_PROTO;
 
-                               /* Get filter info */
-                               flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
                                /* Check if it is fragment. */
                                frag_off = ipv4_spec->hdr.fragment_offset;
                                frag_off = rte_be_to_cpu_16(frag_off);
                                if (frag_off & IPV4_HDR_OFFSET_MASK ||
                                    frag_off & IPV4_HDR_MF_FLAG)
-                                       flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+                                       pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
 
                                /* Get the filter info */
                                filter->input.flow.ip4_flow.proto =
@@ -2476,19 +2621,30 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                        ipv4_spec->hdr.src_addr;
                                filter->input.flow.ip4_flow.dst_ip =
                                        ipv4_spec->hdr.dst_addr;
+                       } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
+                               filter->input.flow_ext.inner_ip = true;
+                               filter->input.flow_ext.iip_type =
+                                       I40E_FDIR_IPTYPE_IPV4;
+                       } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid inner IPv4 mask.");
+                               return -rte_errno;
                        }
 
-                       layer_idx = I40E_FLXPLD_L3_IDX;
+                       if (outer_ip)
+                               outer_ip = false;
 
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV6:
                        l3 = RTE_FLOW_ITEM_TYPE_IPV6;
-                       ipv6_spec =
-                               (const struct rte_flow_item_ipv6 *)item->spec;
-                       ipv6_mask =
-                               (const struct rte_flow_item_ipv6 *)item->mask;
+                       ipv6_spec = item->spec;
+                       ipv6_mask = item->mask;
+                       pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+                       layer_idx = I40E_FLXPLD_L3_IDX;
 
-                       if (ipv6_spec && ipv6_mask) {
+                       if (ipv6_spec && ipv6_mask && outer_ip) {
                                /* Check IPv6 mask and update input set */
                                if (ipv6_mask->hdr.payload_len) {
                                        rte_flow_error_set(error, EINVAL,
@@ -2508,8 +2664,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                        input_set |= I40E_INSET_IPV6_DST;
 
                                if ((ipv6_mask->hdr.vtc_flow &
-                                    rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
-                                   == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+                                    rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
+                                   == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
                                        input_set |= I40E_INSET_IPV6_TC;
                                if (ipv6_mask->hdr.proto == UINT8_MAX)
                                        input_set |= I40E_INSET_IPV6_NEXT_HDR;
@@ -2517,9 +2673,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                        input_set |= I40E_INSET_IPV6_HOP_LIMIT;
 
                                /* Get filter info */
+                               vtc_flow_cpu =
+                                     rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
                                filter->input.flow.ipv6_flow.tc =
-                                       (uint8_t)(ipv6_spec->hdr.vtc_flow <<
-                                                 I40E_IPV4_TC_SHIFT);
+                                       (uint8_t)(vtc_flow_cpu >>
+                                                 I40E_FDIR_IPv6_TC_OFFSET);
                                filter->input.flow.ipv6_flow.proto =
                                        ipv6_spec->hdr.proto;
                                filter->input.flow.ipv6_flow.hop_limits =
@@ -2533,20 +2691,32 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                /* Check if it is fragment. */
                                if (ipv6_spec->hdr.proto ==
                                    I40E_IPV6_FRAG_HEADER)
-                                       flow_type =
-                                               RTE_ETH_FLOW_FRAG_IPV6;
-                               else
-                                       flow_type =
-                                               RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+                                       pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
+                       } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
+                               filter->input.flow_ext.inner_ip = true;
+                               filter->input.flow_ext.iip_type =
+                                       I40E_FDIR_IPTYPE_IPV6;
+                       } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid inner IPv6 mask");
+                               return -rte_errno;
                        }
 
-                       layer_idx = I40E_FLXPLD_L3_IDX;
-
+                       if (outer_ip)
+                               outer_ip = false;
                        break;
                case RTE_FLOW_ITEM_TYPE_TCP:
-                       tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
-                       tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
-
+                       tcp_spec = item->spec;
+                       tcp_mask = item->mask;
+
+                       if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+                               pctype =
+                                       I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+                       else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+                               pctype =
+                                       I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
                        if (tcp_spec && tcp_mask) {
                                /* Check TCP mask and update input set */
                                if (tcp_mask->hdr.sent_seq ||
@@ -2569,13 +2739,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                        input_set |= I40E_INSET_DST_PORT;
 
                                /* Get filter info */
-                               if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-                                       flow_type =
-                                               RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
-                               else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-                                       flow_type =
-                                               RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
-
                                if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
                                        filter->input.flow.tcp4_flow.src_port =
                                                tcp_spec->hdr.src_port;
@@ -2593,8 +2756,15 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
                        break;
                case RTE_FLOW_ITEM_TYPE_UDP:
-                       udp_spec = (const struct rte_flow_item_udp *)item->spec;
-                       udp_mask = (const struct rte_flow_item_udp *)item->mask;
+                       udp_spec = item->spec;
+                       udp_mask = item->mask;
+
+                       if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+                               pctype =
+                                       I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+                       else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+                               pctype =
+                                       I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
 
                        if (udp_spec && udp_mask) {
                                /* Check UDP mask and update input set*/
@@ -2613,13 +2783,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                        input_set |= I40E_INSET_DST_PORT;
 
                                /* Get filter info */
-                               if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-                                       flow_type =
-                                               RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
-                               else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-                                       flow_type =
-                                               RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
-
                                if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
                                        filter->input.flow.udp4_flow.src_port =
                                                udp_spec->hdr.src_port;
@@ -2635,12 +2798,48 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
                        layer_idx = I40E_FLXPLD_L4_IDX;
 
+                       break;
+               case RTE_FLOW_ITEM_TYPE_GTPC:
+               case RTE_FLOW_ITEM_TYPE_GTPU:
+                       if (!pf->gtp_support) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Unsupported protocol");
+                               return -rte_errno;
+                       }
+
+                       gtp_spec = item->spec;
+                       gtp_mask = item->mask;
+
+                       if (gtp_spec && gtp_mask) {
+                               if (gtp_mask->v_pt_rsv_flags ||
+                                   gtp_mask->msg_type ||
+                                   gtp_mask->msg_len ||
+                                   gtp_mask->teid != UINT32_MAX) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid GTP mask");
+                                       return -rte_errno;
+                               }
+
+                               filter->input.flow.gtp_flow.teid =
+                                       gtp_spec->teid;
+                               filter->input.flow_ext.customized_pctype = true;
+                               cus_proto = item_type;
+                       }
                        break;
                case RTE_FLOW_ITEM_TYPE_SCTP:
-                       sctp_spec =
-                               (const struct rte_flow_item_sctp *)item->spec;
-                       sctp_mask =
-                               (const struct rte_flow_item_sctp *)item->mask;
+                       sctp_spec = item->spec;
+                       sctp_mask = item->mask;
+
+                       if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+                               pctype =
+                                       I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+                       else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+                               pctype =
+                                       I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
 
                        if (sctp_spec && sctp_mask) {
                                /* Check SCTP mask and update input set */
@@ -2660,13 +2859,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                        input_set |= I40E_INSET_SCTP_VT;
 
                                /* Get filter info */
-                               if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
-                                       flow_type =
-                                               RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
-                               else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
-                                       flow_type =
-                                               RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
-
                                if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
                                        filter->input.flow.sctp4_flow.src_port =
                                                sctp_spec->hdr.src_port;
@@ -2688,8 +2880,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
 
                        break;
                case RTE_FLOW_ITEM_TYPE_RAW:
-                       raw_spec = (const struct rte_flow_item_raw *)item->spec;
-                       raw_mask = (const struct rte_flow_item_raw *)item->mask;
+                       raw_spec = item->spec;
+                       raw_mask = item->mask;
 
                        if (!raw_spec || !raw_mask) {
                                rte_flow_error_set(error, EINVAL,
@@ -2699,6 +2891,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                                return -rte_errno;
                        }
 
+                       if (pf->support_multi_driver) {
+                               rte_flow_error_set(error, ENOTSUP,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Unsupported flexible payload.");
+                               return -rte_errno;
+                       }
+
                        ret = i40e_flow_check_raw_item(item, raw_spec, error);
                        if (ret < 0)
                                return ret;
@@ -2757,7 +2957,17 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                        raw_id++;
                        break;
                case RTE_FLOW_ITEM_TYPE_VF:
-                       vf_spec = (const struct rte_flow_item_vf *)item->spec;
+                       vf_spec = item->spec;
+                       if (!attr->transfer) {
+                               rte_flow_error_set(error, ENOTSUP,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Matching VF traffic"
+                                                  " without affecting it"
+                                                  " (transfer attribute)"
+                                                  " is unsupported");
+                               return -rte_errno;
+                       }
                        filter->input.flow_ext.is_vf = 1;
                        filter->input.flow_ext.dst_id = vf_spec->id;
                        if (filter->input.flow_ext.is_vf &&
@@ -2774,51 +2984,58 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
                }
        }
 
-       pctype = i40e_flowtype_to_pctype(flow_type);
-       if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
-               rte_flow_error_set(error, EINVAL,
-                                  RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                  "Unsupported flow type");
-               return -rte_errno;
+       /* Get customized pctype value */
+       if (filter->input.flow_ext.customized_pctype) {
+               pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
+               if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          item,
+                                          "Unsupported pctype");
+                       return -rte_errno;
+               }
        }
 
-       ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
-       if (ret == -1) {
-               rte_flow_error_set(error, EINVAL,
-                                  RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                  "Conflict with the first rule's input set.");
-               return -rte_errno;
-       } else if (ret == -EINVAL) {
-               rte_flow_error_set(error, EINVAL,
-                                  RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                  "Invalid pattern mask.");
-               return -rte_errno;
-       }
+       /* If customized pctype is not used, set fdir configuration.*/
+       if (!filter->input.flow_ext.customized_pctype) {
+               ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+               if (ret == -1) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                          "Conflict with the first rule's input set.");
+                       return -rte_errno;
+               } else if (ret == -EINVAL) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                          "Invalid pattern mask.");
+                       return -rte_errno;
+               }
 
-       filter->input.flow_type = flow_type;
+               /* Store flex mask to SW */
+               ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+               if (ret == -1) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          item,
+                                          "Exceed maximal number of bitmasks");
+                       return -rte_errno;
+               } else if (ret == -2) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          item,
+                                          "Conflict with the first flexible rule");
+                       return -rte_errno;
+               } else if (ret > 0)
+                       cfg_flex_msk = false;
 
-       /* Store flex mask to SW */
-       ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
-       if (ret == -1) {
-               rte_flow_error_set(error, EINVAL,
-                                  RTE_FLOW_ERROR_TYPE_ITEM,
-                                  item,
-                                  "Exceed maximal number of bitmasks");
-               return -rte_errno;
-       } else if (ret == -2) {
-               rte_flow_error_set(error, EINVAL,
-                                  RTE_FLOW_ERROR_TYPE_ITEM,
-                                  item,
-                                  "Conflict with the first flexible rule");
-               return -rte_errno;
-       } else if (ret > 0)
-               cfg_flex_msk = false;
+               if (cfg_flex_pit)
+                       i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
 
-       if (cfg_flex_pit)
-               i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+               if (cfg_flex_msk)
+                       i40e_flow_set_fdir_flex_msk(pf, pctype);
+       }
 
-       if (cfg_flex_msk)
-               i40e_flow_set_fdir_flex_msk(pf, pctype);
+       filter->input.pctype = pctype;
 
        return 0;
 }
@@ -2830,7 +3047,7 @@ static int
 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
                            const struct rte_flow_action *actions,
                            struct rte_flow_error *error,
-                           struct rte_eth_fdir_filter *filter)
+                           struct i40e_fdir_filter_conf *filter)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        const struct rte_flow_action *act;
@@ -2842,21 +3059,24 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
        NEXT_ITEM_OF_ACTION(act, actions, index);
        switch (act->type) {
        case RTE_FLOW_ACTION_TYPE_QUEUE:
-               act_q = (const struct rte_flow_action_queue *)act->conf;
+               act_q = act->conf;
                filter->action.rx_queue = act_q->index;
-               if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+               if ((!filter->input.flow_ext.is_vf &&
+                    filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
+                   (filter->input.flow_ext.is_vf &&
+                    filter->action.rx_queue >= pf->vf_nb_qps)) {
                        rte_flow_error_set(error, EINVAL,
                                           RTE_FLOW_ERROR_TYPE_ACTION, act,
                                           "Invalid queue ID for FDIR.");
                        return -rte_errno;
                }
-               filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+               filter->action.behavior = I40E_FDIR_ACCEPT;
                break;
        case RTE_FLOW_ACTION_TYPE_DROP:
-               filter->action.behavior = RTE_ETH_FDIR_REJECT;
+               filter->action.behavior = I40E_FDIR_REJECT;
                break;
        case RTE_FLOW_ACTION_TYPE_PASSTHRU:
-               filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+               filter->action.behavior = I40E_FDIR_PASSTHRU;
                break;
        default:
                rte_flow_error_set(error, EINVAL,
@@ -2870,12 +3090,12 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
        NEXT_ITEM_OF_ACTION(act, actions, index);
        switch (act->type) {
        case RTE_FLOW_ACTION_TYPE_MARK:
-               mark_spec = (const struct rte_flow_action_mark *)act->conf;
-               filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+               mark_spec = act->conf;
+               filter->action.report_status = I40E_FDIR_REPORT_ID;
                filter->soft_id = mark_spec->id;
                break;
        case RTE_FLOW_ACTION_TYPE_FLAG:
-               filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+               filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
                break;
        case RTE_FLOW_ACTION_TYPE_END:
                return 0;
@@ -2906,11 +3126,13 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
                            struct rte_flow_error *error,
                            union i40e_filter_t *filter)
 {
-       struct rte_eth_fdir_filter *fdir_filter =
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_fdir_filter_conf *fdir_filter =
                &filter->fdir_filter;
        int ret;
 
-       ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+       ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
+                                          fdir_filter);
        if (ret)
                return ret;
 
@@ -2926,14 +3148,29 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
 
        if (dev->data->dev_conf.fdir_conf.mode !=
            RTE_FDIR_MODE_PERFECT) {
-               rte_flow_error_set(error, ENOTSUP,
-                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                  NULL,
-                                  "Check the mode in fdir_conf.");
-               return -rte_errno;
+               /* Enable fdir when fdir flow is added at first time. */
+               ret = i40e_fdir_setup(pf);
+               if (ret != I40E_SUCCESS) {
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_HANDLE,
+                                          NULL, "Failed to setup fdir.");
+                       return -rte_errno;
+               }
+               ret = i40e_fdir_configure(dev);
+               if (ret < 0) {
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_HANDLE,
+                                          NULL, "Failed to configure fdir.");
+                       goto err;
+               }
+
+               dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
        }
 
        return 0;
+err:
+       i40e_fdir_teardown(pf);
+       return -rte_errno;
 }
 
 /* Parse to get the action info of a tunnel filter
@@ -2961,7 +3198,7 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
        }
 
        if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
-               act_vf = (const struct rte_flow_action_vf *)act->conf;
+               act_vf = act->conf;
                filter->vf_id = act_vf->id;
                filter->is_to_vf = 1;
                if (filter->vf_id >= pf->vf_num) {
@@ -2976,7 +3213,7 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
        index++;
        NEXT_ITEM_OF_ACTION(act, actions, index);
        if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
-               act_q = (const struct rte_flow_action_queue *)act->conf;
+               act_q = act->conf;
                filter->queue_id = act_q->index;
                if ((!filter->is_to_vf) &&
                    (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
@@ -3068,8 +3305,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
                item_type = item->type;
                switch (item_type) {
                case RTE_FLOW_ITEM_TYPE_ETH:
-                       eth_spec = (const struct rte_flow_item_eth *)item->spec;
-                       eth_mask = (const struct rte_flow_item_eth *)item->mask;
+                       eth_spec = item->spec;
+                       eth_mask = item->mask;
 
                        /* Check if ETH item is used for place holder.
                         * If yes, both spec and mask should be NULL.
@@ -3112,11 +3349,10 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
                        }
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
-                       vlan_spec =
-                               (const struct rte_flow_item_vlan *)item->spec;
-                       vlan_mask =
-                               (const struct rte_flow_item_vlan *)item->mask;
-                       if (!(vlan_spec && vlan_mask)) {
+                       vlan_spec = item->spec;
+                       vlan_mask = item->mask;
+                       if (!(vlan_spec && vlan_mask) ||
+                           vlan_mask->inner_type) {
                                rte_flow_error_set(error, EINVAL,
                                                   RTE_FLOW_ERROR_TYPE_ITEM,
                                                   item,
@@ -3172,10 +3408,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
                        }
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
-                       vxlan_spec =
-                               (const struct rte_flow_item_vxlan *)item->spec;
-                       vxlan_mask =
-                               (const struct rte_flow_item_vxlan *)item->mask;
+                       vxlan_spec = item->spec;
+                       vxlan_mask = item->mask;
                        /* Check if VXLAN item is used to describe protocol.
                         * If yes, both spec and mask should be NULL.
                         * If no, both spec and mask shouldn't be NULL.
@@ -3301,8 +3535,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
                item_type = item->type;
                switch (item_type) {
                case RTE_FLOW_ITEM_TYPE_ETH:
-                       eth_spec = (const struct rte_flow_item_eth *)item->spec;
-                       eth_mask = (const struct rte_flow_item_eth *)item->mask;
+                       eth_spec = item->spec;
+                       eth_mask = item->mask;
 
                        /* Check if ETH item is used for place holder.
                         * If yes, both spec and mask should be NULL.
@@ -3346,11 +3580,10 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
 
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
-                       vlan_spec =
-                               (const struct rte_flow_item_vlan *)item->spec;
-                       vlan_mask =
-                               (const struct rte_flow_item_vlan *)item->mask;
-                       if (!(vlan_spec && vlan_mask)) {
+                       vlan_spec = item->spec;
+                       vlan_mask = item->mask;
+                       if (!(vlan_spec && vlan_mask) ||
+                           vlan_mask->inner_type) {
                                rte_flow_error_set(error, EINVAL,
                                                   RTE_FLOW_ERROR_TYPE_ITEM,
                                                   item,
@@ -3394,10 +3627,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
                        }
                        break;
                case RTE_FLOW_ITEM_TYPE_NVGRE:
-                       nvgre_spec =
-                               (const struct rte_flow_item_nvgre *)item->spec;
-                       nvgre_mask =
-                               (const struct rte_flow_item_nvgre *)item->mask;
+                       nvgre_spec = item->spec;
+                       nvgre_mask = item->mask;
                        /* Check if NVGRE item is used to describe protocol.
                         * If yes, both spec and mask should be NULL.
                         * If no, both spec and mask shouldn't be NULL.
@@ -3422,6 +3653,41 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
                                                       "Invalid TNI mask");
                                        return -rte_errno;
                                }
+                               if (nvgre_mask->protocol &&
+                                       nvgre_mask->protocol != 0xFFFF) {
+                                       rte_flow_error_set(error, EINVAL,
+                                               RTE_FLOW_ERROR_TYPE_ITEM,
+                                               item,
+                                               "Invalid NVGRE item");
+                                       return -rte_errno;
+                               }
+                               if (nvgre_mask->c_k_s_rsvd0_ver &&
+                                       nvgre_mask->c_k_s_rsvd0_ver !=
+                                       rte_cpu_to_be_16(0xFFFF)) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid NVGRE item");
+                                       return -rte_errno;
+                               }
+                               if (nvgre_spec->c_k_s_rsvd0_ver !=
+                                       rte_cpu_to_be_16(0x2000) &&
+                                       nvgre_mask->c_k_s_rsvd0_ver) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid NVGRE item");
+                                       return -rte_errno;
+                               }
+                               if (nvgre_mask->protocol &&
+                                       nvgre_spec->protocol !=
+                                       rte_cpu_to_be_16(0x6558)) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid NVGRE item");
+                                       return -rte_errno;
+                               }
                                rte_memcpy(((uint8_t *)&tenant_id_be + 1),
                                           nvgre_spec->tni, 3);
                                filter->tenant_id =
@@ -3573,10 +3839,8 @@ i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
                        }
                        break;
                case RTE_FLOW_ITEM_TYPE_MPLS:
-                       mpls_spec =
-                               (const struct rte_flow_item_mpls *)item->spec;
-                       mpls_mask =
-                               (const struct rte_flow_item_mpls *)item->mask;
+                       mpls_spec = item->spec;
+                       mpls_mask = item->mask;
 
                        if (!mpls_spec || !mpls_mask) {
                                rte_flow_error_set(error, EINVAL,
@@ -3640,6 +3904,146 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
        return ret;
 }
 
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: GTP TEID.
+ * 3. Mask of fields which need to be matched should be
+ *    filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ *    filled with 0.
+ * 5. GTP profile supports GTPv1 only.
+ * 6. GTP-C response message ('source_port' = 2123) is not supported.
+ */
+static int
+i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
+                           const struct rte_flow_item *pattern,
+                           struct rte_flow_error *error,
+                           struct i40e_tunnel_filter_conf *filter)
+{
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       const struct rte_flow_item *item = pattern;
+       const struct rte_flow_item_gtp *gtp_spec;
+       const struct rte_flow_item_gtp *gtp_mask;
+       enum rte_flow_item_type item_type;
+
+       if (!pf->gtp_support) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                  item,
+                                  "GTP is not supported by default.");
+               return -rte_errno;
+       }
+
+       for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          item,
+                                          "Not support range");
+                       return -rte_errno;
+               }
+               item_type = item->type;
+               switch (item_type) {
+               case RTE_FLOW_ITEM_TYPE_ETH:
+                       if (item->spec || item->mask) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid ETH item");
+                               return -rte_errno;
+                       }
+                       break;
+               case RTE_FLOW_ITEM_TYPE_IPV4:
+                       filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+                       /* IPv4 is used to describe protocol,
+                        * spec and mask should be NULL.
+                        */
+                       if (item->spec || item->mask) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid IPv4 item");
+                               return -rte_errno;
+                       }
+                       break;
+               case RTE_FLOW_ITEM_TYPE_UDP:
+                       if (item->spec || item->mask) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid UDP item");
+                               return -rte_errno;
+                       }
+                       break;
+               case RTE_FLOW_ITEM_TYPE_GTPC:
+               case RTE_FLOW_ITEM_TYPE_GTPU:
+                       gtp_spec = item->spec;
+                       gtp_mask = item->mask;
+
+                       if (!gtp_spec || !gtp_mask) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid GTP item");
+                               return -rte_errno;
+                       }
+
+                       if (gtp_mask->v_pt_rsv_flags ||
+                           gtp_mask->msg_type ||
+                           gtp_mask->msg_len ||
+                           gtp_mask->teid != UINT32_MAX) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid GTP mask");
+                               return -rte_errno;
+                       }
+
+                       if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
+                               filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
+                       else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+                               filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
+
+                       filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
+
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int
+i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+                          const struct rte_flow_attr *attr,
+                          const struct rte_flow_item pattern[],
+                          const struct rte_flow_action actions[],
+                          struct rte_flow_error *error,
+                          union i40e_filter_t *filter)
+{
+       struct i40e_tunnel_filter_conf *tunnel_filter =
+               &filter->consistent_tunnel_filter;
+       int ret;
+
+       ret = i40e_flow_parse_gtp_pattern(dev, pattern,
+                                         error, tunnel_filter);
+       if (ret)
+               return ret;
+
+       ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+       if (ret)
+               return ret;
+
+       ret = i40e_flow_parse_attr(attr, error);
+       if (ret)
+               return ret;
+
+       cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+       return ret;
+}
+
 /* 1. Last in item should be NULL as range is not supported.
  * 2. Supported filter types: QINQ.
  * 3. Mask of fields which need to be matched should be
@@ -3684,12 +4088,11 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
                        }
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
-                       vlan_spec =
-                               (const struct rte_flow_item_vlan *)item->spec;
-                       vlan_mask =
-                               (const struct rte_flow_item_vlan *)item->mask;
+                       vlan_spec = item->spec;
+                       vlan_mask = item->mask;
 
-                       if (!(vlan_spec && vlan_mask)) {
+                       if (!(vlan_spec && vlan_mask) ||
+                           vlan_mask->inner_type) {
                                rte_flow_error_set(error, EINVAL,
                                           RTE_FLOW_ERROR_TYPE_ITEM,
                                           item,
@@ -3714,8 +4117,10 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
        }
 
        /* Get filter specification */
-       if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
-           (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+       if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
+                       rte_cpu_to_be_16(I40E_TCI_MASK)) &&
+                       (i_vlan_mask != NULL) &&
+                       (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
                filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
                        & I40E_TCI_MASK;
                filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
@@ -3762,6 +4167,392 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
        return ret;
 }
 
+/**
+ * This function is used to do configuration i40e existing RSS with rte_flow.
+ * It also enable queue region configuration using flow API for i40e.
+ * pattern can be used indicate what parameters will be include in flow,
+ * like user_priority or flowtype for queue region or HASH function for RSS.
+ * Action is used to transmit parameter like queue index and HASH
+ * function for RSS, or flowtype for queue region configuration.
+ * For example:
+ * pattern:
+ * Case 1: only ETH, indicate  flowtype for queue region will be parsed.
+ * Case 2: only VLAN, indicate user_priority for queue region will be parsed.
+ * Case 3: none, indicate RSS related will be parsed in action.
+ * Any pattern other the ETH or VLAN will be treated as invalid except END.
+ * So, pattern choice is depened on the purpose of configuration of
+ * that flow.
+ * action:
+ * action RSS will be uaed to transmit valid parameter with
+ * struct rte_flow_action_rss for all the 3 case.
+ */
+static int
+i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
+                            const struct rte_flow_item *pattern,
+                            struct rte_flow_error *error,
+                            uint8_t *action_flag,
+                            struct i40e_queue_regions *info)
+{
+       const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+       const struct rte_flow_item *item = pattern;
+       enum rte_flow_item_type item_type;
+
+       if (item->type == RTE_FLOW_ITEM_TYPE_END)
+               return 0;
+
+       for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          item,
+                                          "Not support range");
+                       return -rte_errno;
+               }
+               item_type = item->type;
+               switch (item_type) {
+               case RTE_FLOW_ITEM_TYPE_ETH:
+                       *action_flag = 1;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_VLAN:
+                       vlan_spec = item->spec;
+                       vlan_mask = item->mask;
+                       if (vlan_spec && vlan_mask) {
+                               if (vlan_mask->tci ==
+                                       rte_cpu_to_be_16(I40E_TCI_MASK)) {
+                                       info->region[0].user_priority[0] =
+                                               (rte_be_to_cpu_16(
+                                               vlan_spec->tci) >> 13) & 0x7;
+                                       info->region[0].user_priority_num = 1;
+                                       info->queue_region_number = 1;
+                                       *action_flag = 0;
+                               }
+                       }
+                       break;
+               default:
+                       rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM,
+                                       item,
+                                       "Not support range");
+                       return -rte_errno;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * This function is used to parse rss queue index, total queue number and
+ * hash functions, If the purpose of this configuration is for queue region
+ * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
+ * In queue region configuration, it also need to parse hardware flowtype
+ * and user_priority from configuration, it will also cheeck the validity
+ * of these parameters. For example, The queue region sizes should
+ * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
+ * hw_flowtype or PCTYPE max index should be 63, the user priority
+ * max index should be 7, and so on. And also, queue index should be
+ * continuous sequence and queue region index should be part of rss
+ * queue index for this port.
+ */
+static int
+i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
+                           const struct rte_flow_action *actions,
+                           struct rte_flow_error *error,
+                           uint8_t action_flag,
+                           struct i40e_queue_regions *conf_info,
+                           union i40e_filter_t *filter)
+{
+       const struct rte_flow_action *act;
+       const struct rte_flow_action_rss *rss;
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_queue_regions *info = &pf->queue_region;
+       struct i40e_rte_flow_rss_conf *rss_config =
+                       &filter->rss_conf;
+       struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+       uint16_t i, j, n, tmp;
+       uint32_t index = 0;
+       uint64_t hf_bit = 1;
+
+       NEXT_ITEM_OF_ACTION(act, actions, index);
+       rss = act->conf;
+
+       /**
+        * rss only supports forwarding,
+        * check if the first not void action is RSS.
+        */
+       if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+               memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       act, "Not supported action.");
+               return -rte_errno;
+       }
+
+       if (action_flag) {
+               for (n = 0; n < 64; n++) {
+                       if (rss->types & (hf_bit << n)) {
+                               conf_info->region[0].hw_flowtype[0] = n;
+                               conf_info->region[0].flowtype_num = 1;
+                               conf_info->queue_region_number = 1;
+                               break;
+                       }
+               }
+       }
+
+       /**
+        * Do some queue region related parameters check
+        * in order to keep queue index for queue region to be
+        * continuous sequence and also to be part of RSS
+        * queue index for this port.
+        */
+       if (conf_info->queue_region_number) {
+               for (i = 0; i < rss->queue_num; i++) {
+                       for (j = 0; j < rss_info->conf.queue_num; j++) {
+                               if (rss->queue[i] == rss_info->conf.queue[j])
+                                       break;
+                       }
+                       if (j == rss_info->conf.queue_num) {
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION,
+                                       act,
+                                       "no valid queues");
+                               return -rte_errno;
+                       }
+               }
+
+               for (i = 0; i < rss->queue_num - 1; i++) {
+                       if (rss->queue[i + 1] != rss->queue[i] + 1) {
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION,
+                                       act,
+                                       "no valid queues");
+                               return -rte_errno;
+                       }
+               }
+       }
+
+       /* Parse queue region related parameters from configuration */
+       for (n = 0; n < conf_info->queue_region_number; n++) {
+               if (conf_info->region[n].user_priority_num ||
+                               conf_info->region[n].flowtype_num) {
+                       if (!((rte_is_power_of_2(rss->queue_num)) &&
+                                       rss->queue_num <= 64)) {
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION,
+                                       act,
+                                       "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+                                       "total number of queues do not exceed the VSI allocation");
+                               return -rte_errno;
+                       }
+
+                       if (conf_info->region[n].user_priority[n] >=
+                                       I40E_MAX_USER_PRIORITY) {
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION,
+                                       act,
+                                       "the user priority max index is 7");
+                               return -rte_errno;
+                       }
+
+                       if (conf_info->region[n].hw_flowtype[n] >=
+                                       I40E_FILTER_PCTYPE_MAX) {
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION,
+                                       act,
+                                       "the hw_flowtype or PCTYPE max index is 63");
+                               return -rte_errno;
+                       }
+
+                       for (i = 0; i < info->queue_region_number; i++) {
+                               if (info->region[i].queue_num ==
+                                   rss->queue_num &&
+                                       info->region[i].queue_start_index ==
+                                               rss->queue[0])
+                                       break;
+                       }
+
+                       if (i == info->queue_region_number) {
+                               if (i > I40E_REGION_MAX_INDEX) {
+                                       rte_flow_error_set(error, EINVAL,
+                                               RTE_FLOW_ERROR_TYPE_ACTION,
+                                               act,
+                                               "the queue region max index is 7");
+                                       return -rte_errno;
+                               }
+
+                               info->region[i].queue_num =
+                                       rss->queue_num;
+                               info->region[i].queue_start_index =
+                                       rss->queue[0];
+                               info->region[i].region_id =
+                                       info->queue_region_number;
+
+                               j = info->region[i].user_priority_num;
+                               tmp = conf_info->region[n].user_priority[0];
+                               if (conf_info->region[n].user_priority_num) {
+                                       info->region[i].user_priority[j] = tmp;
+                                       info->region[i].user_priority_num++;
+                               }
+
+                               j = info->region[i].flowtype_num;
+                               tmp = conf_info->region[n].hw_flowtype[0];
+                               if (conf_info->region[n].flowtype_num) {
+                                       info->region[i].hw_flowtype[j] = tmp;
+                                       info->region[i].flowtype_num++;
+                               }
+                               info->queue_region_number++;
+                       } else {
+                               j = info->region[i].user_priority_num;
+                               tmp = conf_info->region[n].user_priority[0];
+                               if (conf_info->region[n].user_priority_num) {
+                                       info->region[i].user_priority[j] = tmp;
+                                       info->region[i].user_priority_num++;
+                               }
+
+                               j = info->region[i].flowtype_num;
+                               tmp = conf_info->region[n].hw_flowtype[0];
+                               if (conf_info->region[n].flowtype_num) {
+                                       info->region[i].hw_flowtype[j] = tmp;
+                                       info->region[i].flowtype_num++;
+                               }
+                       }
+               }
+
+               rss_config->queue_region_conf = TRUE;
+       }
+
+       /**
+        * Return function if this flow is used for queue region configuration
+        */
+       if (rss_config->queue_region_conf)
+               return 0;
+
+       if (!rss || !rss->queue_num) {
+               rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION,
+                               act,
+                               "no valid queues");
+               return -rte_errno;
+       }
+
+       for (n = 0; n < rss->queue_num; n++) {
+               if (rss->queue[n] >= dev->data->nb_rx_queues) {
+                       rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION,
+                                  act,
+                                  "queue id > max number of queues");
+                       return -rte_errno;
+               }
+       }
+
+       if (rss_info->conf.queue_num) {
+               rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION,
+                               act,
+                               "rss only allow one valid rule");
+               return -rte_errno;
+       }
+
+       /* Parse RSS related parameters from configuration */
+       if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "non-default RSS hash functions are not supported");
+       if (rss->level)
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "a nonzero RSS encapsulation level is not supported");
+       if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "RSS hash key too large");
+       if (rss->queue_num > RTE_DIM(rss_config->queue))
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "too many queues for RSS context");
+       if (i40e_rss_conf_init(rss_config, rss))
+               return rte_flow_error_set
+                       (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "RSS context initialization failure");
+
+       index++;
+
+       /* check if the next not void action is END */
+       NEXT_ITEM_OF_ACTION(act, actions, index);
+       if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+               memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       act, "Not supported action.");
+               return -rte_errno;
+       }
+       rss_config->queue_region_conf = FALSE;
+
+       return 0;
+}
+
+static int
+i40e_parse_rss_filter(struct rte_eth_dev *dev,
+                       const struct rte_flow_attr *attr,
+                       const struct rte_flow_item pattern[],
+                       const struct rte_flow_action actions[],
+                       union i40e_filter_t *filter,
+                       struct rte_flow_error *error)
+{
+       int ret;
+       struct i40e_queue_regions info;
+       uint8_t action_flag = 0;
+
+       memset(&info, 0, sizeof(struct i40e_queue_regions));
+
+       ret = i40e_flow_parse_rss_pattern(dev, pattern,
+                                       error, &action_flag, &info);
+       if (ret)
+               return ret;
+
+       ret = i40e_flow_parse_rss_action(dev, actions, error,
+                                       action_flag, &info, filter);
+       if (ret)
+               return ret;
+
+       ret = i40e_flow_parse_attr(attr, error);
+       if (ret)
+               return ret;
+
+       cons_filter_type = RTE_ETH_FILTER_HASH;
+
+       return 0;
+}
+
+static int
+i40e_config_rss_filter_set(struct rte_eth_dev *dev,
+               struct i40e_rte_flow_rss_conf *conf)
+{
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       if (conf->queue_region_conf) {
+               ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+               conf->queue_region_conf = 0;
+       } else {
+               ret = i40e_config_rss_filter(pf, conf, 1);
+       }
+       return ret;
+}
+
+static int
+i40e_config_rss_filter_del(struct rte_eth_dev *dev,
+               struct i40e_rte_flow_rss_conf *conf)
+{
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+       i40e_config_rss_filter(pf, conf, 0);
+       return 0;
+}
+
 static int
 i40e_flow_validate(struct rte_eth_dev *dev,
                   const struct rte_flow_attr *attr,
@@ -3798,6 +4589,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
 
        memset(&cons_filter, 0, sizeof(cons_filter));
 
+       /* Get the non-void item of action */
+       while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
+               i++;
+
+       if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
+               ret = i40e_parse_rss_filter(dev, attr, pattern,
+                                       actions, &cons_filter, error);
+               return ret;
+       }
+
+       i = 0;
        /* Get the non-void item number of pattern */
        while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
                if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
@@ -3870,7 +4672,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
                                        i40e_ethertype_filter_list);
                break;
        case RTE_ETH_FILTER_FDIR:
-               ret = i40e_add_del_fdir_filter(dev,
+               ret = i40e_flow_add_del_fdir_filter(dev,
                                       &cons_filter.fdir_filter, 1);
                if (ret)
                        goto free_flow;
@@ -3885,6 +4687,13 @@ i40e_flow_create(struct rte_eth_dev *dev,
                flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
                                        i40e_tunnel_filter_list);
                break;
+       case RTE_ETH_FILTER_HASH:
+               ret = i40e_config_rss_filter_set(dev,
+                           &cons_filter.rss_conf);
+               if (ret)
+                       goto free_flow;
+               flow->rule = &pf->rss_info;
+               break;
        default:
                goto free_flow;
        }
@@ -3920,8 +4729,19 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
                              (struct i40e_tunnel_filter *)flow->rule);
                break;
        case RTE_ETH_FILTER_FDIR:
-               ret = i40e_add_del_fdir_filter(dev,
+               ret = i40e_flow_add_del_fdir_filter(dev,
                       &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
+
+               /* If the last flow is destroyed, disable fdir. */
+               if (!ret && !TAILQ_EMPTY(&pf->fdir.fdir_list)) {
+                       i40e_fdir_teardown(pf);
+                       dev->data->dev_conf.fdir_conf.mode =
+                                  RTE_FDIR_MODE_NONE;
+               }
+               break;
+       case RTE_ETH_FILTER_HASH:
+               ret = i40e_config_rss_filter_del(dev,
+                          (struct i40e_rte_flow_rss_conf *)flow->rule);
                break;
        default:
                PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
@@ -3983,17 +4803,17 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
        struct i40e_vsi *vsi;
        struct i40e_pf_vf *vf;
-       struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+       struct i40e_aqc_cloud_filters_element_bb cld_filter;
        struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
        struct i40e_tunnel_filter *node;
        bool big_buffer = 0;
        int ret = 0;
 
        memset(&cld_filter, 0, sizeof(cld_filter));
-       ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
-                       (struct ether_addr *)&cld_filter.element.outer_mac);
-       ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
-                       (struct ether_addr *)&cld_filter.element.inner_mac);
+       ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
+                       (struct rte_ether_addr *)&cld_filter.element.outer_mac);
+       ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
+                       (struct rte_ether_addr *)&cld_filter.element.inner_mac);
        cld_filter.element.inner_vlan = filter->input.inner_vlan;
        cld_filter.element.flags = filter->input.flags;
        cld_filter.element.tenant_id = filter->input.tenant_id;
@@ -4009,20 +4829,20 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
                vsi = vf->vsi;
        }
 
-       if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
-           I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
-           ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
-           I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
-           ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
-           I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+       if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+           I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
+           ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+           I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
+           ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+           I40E_AQC_ADD_CLOUD_FILTER_0X10))
                big_buffer = 1;
 
        if (big_buffer)
-               ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
-                                                             &cld_filter, 1);
+               ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
+                                               &cld_filter, 1);
        else
-               ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
-                                                  &cld_filter.element, 1);
+               ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
+                                               &cld_filter.element, 1);
        if (ret < 0)
                return -ENOTSUP;
 
@@ -4065,6 +4885,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
                return -rte_errno;
        }
 
+       ret = i40e_flow_flush_rss_filter(dev);
+       if (ret) {
+               rte_flow_error_set(error, -ret,
+                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                  "Failed to flush rss flows.");
+               return -rte_errno;
+       }
+
        return ret;
 }
 
@@ -4074,6 +4902,7 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
        struct rte_eth_dev *dev = pf->adapter->eth_dev;
        struct i40e_fdir_info *fdir_info = &pf->fdir;
        struct i40e_fdir_filter *fdir_filter;
+       enum i40e_filter_pctype pctype;
        struct rte_flow *flow;
        void *temp;
        int ret;
@@ -4095,8 +4924,14 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
                                rte_free(flow);
                        }
                }
+
+               for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+                    pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
+                       pf->fdir.inset_flag[pctype] = 0;
        }
 
+       i40e_fdir_teardown(pf);
+
        return ret;
 }
 
@@ -4155,3 +4990,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
 
        return ret;
 }
+
+/* remove the rss filter */
+static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
+{
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int32_t ret = -EINVAL;
+
+       ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+       if (rss_info->conf.queue_num)
+               ret = i40e_config_rss_filter(pf, rss_info, FALSE);
+       return ret;
+}