ethdev: add namespace
[dpdk.git] / drivers / net / qede / qede_filter.c
index b8460a0..4404404 100644 (file)
@@ -8,6 +8,7 @@
 #include <rte_tcp.h>
 #include <rte_sctp.h>
 #include <rte_errno.h>
+#include <rte_flow_driver.h>
 
 #include "qede_ethdev.h"
 
@@ -19,97 +20,97 @@ const struct _qede_udp_tunn_types {
        const char *string;
 } qede_tunn_types[] = {
        {
-               ETH_TUNNEL_FILTER_OMAC,
+               RTE_ETH_TUNNEL_FILTER_OMAC,
                ECORE_FILTER_MAC,
                ECORE_TUNN_CLSS_MAC_VLAN,
                "outer-mac"
        },
        {
-               ETH_TUNNEL_FILTER_TENID,
+               RTE_ETH_TUNNEL_FILTER_TENID,
                ECORE_FILTER_VNI,
                ECORE_TUNN_CLSS_MAC_VNI,
                "vni"
        },
        {
-               ETH_TUNNEL_FILTER_IMAC,
+               RTE_ETH_TUNNEL_FILTER_IMAC,
                ECORE_FILTER_INNER_MAC,
                ECORE_TUNN_CLSS_INNER_MAC_VLAN,
                "inner-mac"
        },
        {
-               ETH_TUNNEL_FILTER_IVLAN,
+               RTE_ETH_TUNNEL_FILTER_IVLAN,
                ECORE_FILTER_INNER_VLAN,
                ECORE_TUNN_CLSS_INNER_MAC_VLAN,
                "inner-vlan"
        },
        {
-               ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+               RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID,
                ECORE_FILTER_MAC_VNI_PAIR,
                ECORE_TUNN_CLSS_MAC_VNI,
                "outer-mac and vni"
        },
        {
-               ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+               RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IMAC,
                ECORE_FILTER_UNUSED,
                MAX_ECORE_TUNN_CLSS,
                "outer-mac and inner-mac"
        },
        {
-               ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+               RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
                ECORE_FILTER_UNUSED,
                MAX_ECORE_TUNN_CLSS,
                "outer-mac and inner-vlan"
        },
        {
-               ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+               RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IMAC,
                ECORE_FILTER_INNER_MAC_VNI_PAIR,
                ECORE_TUNN_CLSS_INNER_MAC_VNI,
                "vni and inner-mac",
        },
        {
-               ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+               RTE_ETH_TUNNEL_FILTER_TENID | RTE_ETH_TUNNEL_FILTER_IVLAN,
                ECORE_FILTER_UNUSED,
                MAX_ECORE_TUNN_CLSS,
                "vni and inner-vlan",
        },
        {
-               ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+               RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
                ECORE_FILTER_INNER_PAIR,
                ECORE_TUNN_CLSS_INNER_MAC_VLAN,
                "inner-mac and inner-vlan",
        },
        {
-               ETH_TUNNEL_FILTER_OIP,
+               RTE_ETH_TUNNEL_FILTER_OIP,
                ECORE_FILTER_UNUSED,
                MAX_ECORE_TUNN_CLSS,
                "outer-IP"
        },
        {
-               ETH_TUNNEL_FILTER_IIP,
+               RTE_ETH_TUNNEL_FILTER_IIP,
                ECORE_FILTER_UNUSED,
                MAX_ECORE_TUNN_CLSS,
                "inner-IP"
        },
        {
-               RTE_TUNNEL_FILTER_IMAC_IVLAN,
+               RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN,
                ECORE_FILTER_UNUSED,
                MAX_ECORE_TUNN_CLSS,
                "IMAC_IVLAN"
        },
        {
-               RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+               RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID,
                ECORE_FILTER_UNUSED,
                MAX_ECORE_TUNN_CLSS,
                "IMAC_IVLAN_TENID"
        },
        {
-               RTE_TUNNEL_FILTER_IMAC_TENID,
+               RTE_ETH_TUNNEL_FILTER_IMAC_TENID,
                ECORE_FILTER_UNUSED,
                MAX_ECORE_TUNN_CLSS,
                "IMAC_TENID"
        },
        {
-               RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+               RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC,
                ECORE_FILTER_UNUSED,
                MAX_ECORE_TUNN_CLSS,
                "OMAC_TENID_IMAC"
@@ -121,7 +122,7 @@ const struct _qede_udp_tunn_types {
 #define QEDE_FDIR_IP_DEFAULT_VERSION_IHL       (IP_VERSION | IP_HDRLEN)
 #define QEDE_FDIR_TCP_DEFAULT_DATAOFF          (0x50)
 #define QEDE_FDIR_IPV4_DEF_TTL                 (64)
-
+#define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW                (0x60000000)
 /* Sum of length of header types of L2, L3, L4.
  * L2 : ether_hdr + vlan_hdr + vxlan_hdr
  * L3 : ipv6_hdr
@@ -129,17 +130,11 @@ const struct _qede_udp_tunn_types {
  */
 #define QEDE_MAX_FDIR_PKT_LEN                  (86)
 
-#ifndef IPV6_ADDR_LEN
-#define IPV6_ADDR_LEN                          (16)
-#endif
-
-static inline bool qede_valid_flow(uint16_t flow_type)
-{
-       return  ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
-                (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
-                (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
-                (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
-}
+static uint16_t
+qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
+                       struct qede_arfs_entry *arfs,
+                       void *buff,
+                       struct ecore_arfs_config_params *params);
 
 /* Note: Flowdir support is only partial.
  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
@@ -149,22 +144,24 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+       struct rte_eth_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
 
        /* check FDIR modes */
        switch (fdir->mode) {
        case RTE_FDIR_MODE_NONE:
-               qdev->fdir_info.arfs.arfs_enable = false;
+               qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
                DP_INFO(edev, "flowdir is disabled\n");
        break;
        case RTE_FDIR_MODE_PERFECT:
                if (ECORE_IS_CMT(edev)) {
                        DP_ERR(edev, "flowdir is not supported in 100G mode\n");
-                       qdev->fdir_info.arfs.arfs_enable = false;
+                       qdev->arfs_info.arfs.mode =
+                               ECORE_FILTER_CONFIG_MODE_DISABLE;
                        return -ENOTSUP;
                }
-               qdev->fdir_info.arfs.arfs_enable = true;
-               DP_INFO(edev, "flowdir is enabled\n");
+               qdev->arfs_info.arfs.mode =
+                               ECORE_FILTER_CONFIG_MODE_5_TUPLE;
+               DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
        break;
        case RTE_FDIR_MODE_PERFECT_TUNNEL:
        case RTE_FDIR_MODE_SIGNATURE:
@@ -179,29 +176,29 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct qede_fdir_entry *tmp = NULL;
+       struct qede_arfs_entry *tmp = NULL;
 
-       SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+       SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
                if (tmp) {
                        if (tmp->mz)
                                rte_memzone_free(tmp->mz);
-                       SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
-                                    qede_fdir_entry, list);
+                       SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
+                                    qede_arfs_entry, list);
                        rte_free(tmp);
                }
        }
 }
 
 static int
-qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
-                           struct rte_eth_fdir_filter *fdir_filter,
-                           bool add)
+qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
+                       struct qede_arfs_entry *arfs,
+                       bool add)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_ntuple_filter_params params;
        char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
-       struct qede_fdir_entry *tmp = NULL;
-       struct qede_fdir_entry *fdir = NULL;
+       struct qede_arfs_entry *tmp = NULL;
        const struct rte_memzone *mz;
        struct ecore_hwfn *p_hwfn;
        enum _ecore_status_t rc;
@@ -209,153 +206,125 @@ qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
        void *pkt;
 
        if (add) {
-               if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
+               if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
                        DP_ERR(edev, "Reached max flowdir filter limit\n");
                        return -EINVAL;
                }
-               fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
-                                 RTE_CACHE_LINE_SIZE);
-               if (!fdir) {
-                       DP_ERR(edev, "Did not allocate memory for fdir\n");
-                       return -ENOMEM;
-               }
        }
+
        /* soft_id could have been used as memzone string, but soft_id is
         * not currently used so it has no significance.
         */
-       snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+       snprintf(mz_name, sizeof(mz_name), "%lx",
                 (unsigned long)rte_get_timer_cycles());
        mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
                                         SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
        if (!mz) {
                DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
                       rte_strerror(rte_errno));
-               rc = -rte_errno;
-               goto err1;
+               return -rte_errno;
        }
 
        pkt = mz->addr;
        memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
-       pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
-                                         &qdev->fdir_info.arfs);
+       pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
+                                         &qdev->arfs_info.arfs);
        if (pkt_len == 0) {
                rc = -EINVAL;
-               goto err2;
+               goto err1;
        }
+
        DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
        if (add) {
-               SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+               SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
                        if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
                                DP_INFO(edev, "flowdir filter exist\n");
-                               rc = 0;
-                               goto err2;
+                               rc = -EEXIST;
+                               goto err1;
                        }
                }
        } else {
-               SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+               SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
                        if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
                                break;
                }
                if (!tmp) {
                        DP_ERR(edev, "flowdir filter does not exist\n");
                        rc = -EEXIST;
-                       goto err2;
+                       goto err1;
                }
        }
        p_hwfn = ECORE_LEADING_HWFN(edev);
        if (add) {
-               if (!qdev->fdir_info.arfs.arfs_enable) {
+               if (qdev->arfs_info.arfs.mode ==
+                       ECORE_FILTER_CONFIG_MODE_DISABLE) {
                        /* Force update */
                        eth_dev->data->dev_conf.fdir_conf.mode =
                                                RTE_FDIR_MODE_PERFECT;
-                       qdev->fdir_info.arfs.arfs_enable = true;
+                       qdev->arfs_info.arfs.mode =
+                                       ECORE_FILTER_CONFIG_MODE_5_TUPLE;
                        DP_INFO(edev, "Force enable flowdir in perfect mode\n");
                }
                /* Enable ARFS searcher with updated flow_types */
                ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
-                                         &qdev->fdir_info.arfs);
+                                         &qdev->arfs_info.arfs);
        }
+
+       memset(&params, 0, sizeof(params));
+       params.addr = (dma_addr_t)mz->iova;
+       params.length = pkt_len;
+       params.qid = arfs->rx_queue;
+       params.vport_id = 0;
+       params.b_is_add = add;
+       params.b_is_drop = arfs->is_drop;
+
        /* configure filter with ECORE_SPQ_MODE_EBLOCK */
        rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
-                                              (dma_addr_t)mz->iova,
-                                              pkt_len,
-                                              fdir_filter->action.rx_queue,
-                                              0, add);
+                                              &params);
        if (rc == ECORE_SUCCESS) {
                if (add) {
-                       fdir->rx_queue = fdir_filter->action.rx_queue;
-                       fdir->pkt_len = pkt_len;
-                       fdir->mz = mz;
-                       SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
-                                         fdir, list);
-                       qdev->fdir_info.filter_count++;
+                       arfs->pkt_len = pkt_len;
+                       arfs->mz = mz;
+                       SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
+                                         arfs, list);
+                       qdev->arfs_info.filter_count++;
                        DP_INFO(edev, "flowdir filter added, count = %d\n",
-                               qdev->fdir_info.filter_count);
+                               qdev->arfs_info.filter_count);
                } else {
                        rte_memzone_free(tmp->mz);
-                       SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
-                                    qede_fdir_entry, list);
+                       SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
+                                    qede_arfs_entry, list);
                        rte_free(tmp); /* the node deleted */
                        rte_memzone_free(mz); /* temp node allocated */
-                       qdev->fdir_info.filter_count--;
+                       qdev->arfs_info.filter_count--;
                        DP_INFO(edev, "Fdir filter deleted, count = %d\n",
-                               qdev->fdir_info.filter_count);
+                               qdev->arfs_info.filter_count);
                }
        } else {
                DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
-                      rc, qdev->fdir_info.filter_count);
+                      rc, qdev->arfs_info.filter_count);
        }
 
        /* Disable ARFS searcher if there are no more filters */
-       if (qdev->fdir_info.filter_count == 0) {
-               memset(&qdev->fdir_info.arfs, 0,
+       if (qdev->arfs_info.filter_count == 0) {
+               memset(&qdev->arfs_info.arfs, 0,
                       sizeof(struct ecore_arfs_config_params));
                DP_INFO(edev, "Disabling flowdir\n");
-               qdev->fdir_info.arfs.arfs_enable = false;
+               qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
                ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
-                                         &qdev->fdir_info.arfs);
+                                         &qdev->arfs_info.arfs);
        }
        return 0;
 
-err2:
-       rte_memzone_free(mz);
 err1:
-       if (add)
-               rte_free(fdir);
+       rte_memzone_free(mz);
        return rc;
 }
 
-static int
-qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
-                    struct rte_eth_fdir_filter *fdir,
-                    bool add)
-{
-       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
-       if (!qede_valid_flow(fdir->input.flow_type)) {
-               DP_ERR(edev, "invalid flow_type input\n");
-               return -EINVAL;
-       }
-
-       if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
-               DP_ERR(edev, "invalid queue number %u\n",
-                      fdir->action.rx_queue);
-               return -EINVAL;
-       }
-
-       if (fdir->input.flow_ext.is_vf) {
-               DP_ERR(edev, "flowdir is not supported over VF\n");
-               return -EINVAL;
-       }
-
-       return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
-}
-
 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
-uint16_t
-qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
-                       struct rte_eth_fdir_filter *fdir,
+static uint16_t
+qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
+                       struct qede_arfs_entry *arfs,
                        void *buff,
                        struct ecore_arfs_config_params *params)
 
@@ -364,218 +333,94 @@ qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        uint16_t *ether_type;
        uint8_t *raw_pkt;
-       struct rte_eth_fdir_input *input;
-       static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
-       struct ipv4_hdr *ip;
-       struct ipv6_hdr *ip6;
-       struct udp_hdr *udp;
-       struct tcp_hdr *tcp;
+       struct rte_ipv4_hdr *ip;
+       struct rte_ipv6_hdr *ip6;
+       struct rte_udp_hdr *udp;
+       struct rte_tcp_hdr *tcp;
        uint16_t len;
-       static const uint8_t next_proto[] = {
-               [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
-               [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
-               [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
-               [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
-       };
+
        raw_pkt = (uint8_t *)buff;
-       input = &fdir->input;
-       DP_INFO(edev, "flow_type %d\n", input->flow_type);
-
-       len =  2 * sizeof(struct ether_addr);
-       raw_pkt += 2 * sizeof(struct ether_addr);
-       if (input->flow_ext.vlan_tci) {
-               DP_INFO(edev, "adding VLAN header\n");
-               rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
-               rte_memcpy(raw_pkt + sizeof(uint16_t),
-                          &input->flow_ext.vlan_tci,
-                          sizeof(uint16_t));
-               raw_pkt += sizeof(vlan_frame);
-               len += sizeof(vlan_frame);
-       }
+
+       len =  2 * sizeof(struct rte_ether_addr);
+       raw_pkt += 2 * sizeof(struct rte_ether_addr);
        ether_type = (uint16_t *)raw_pkt;
        raw_pkt += sizeof(uint16_t);
        len += sizeof(uint16_t);
 
-       switch (input->flow_type) {
-       case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
-       case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
-               /* fill the common ip header */
-               ip = (struct ipv4_hdr *)raw_pkt;
-               *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+       *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
+       switch (arfs->tuple.eth_proto) {
+       case RTE_ETHER_TYPE_IPV4:
+               ip = (struct rte_ipv4_hdr *)raw_pkt;
                ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
-               ip->total_length = sizeof(struct ipv4_hdr);
-               ip->next_proto_id = input->flow.ip4_flow.proto ?
-                                   input->flow.ip4_flow.proto :
-                                   next_proto[input->flow_type];
-               ip->time_to_live = input->flow.ip4_flow.ttl ?
-                                  input->flow.ip4_flow.ttl :
-                                  QEDE_FDIR_IPV4_DEF_TTL;
-               ip->type_of_service = input->flow.ip4_flow.tos;
-               ip->dst_addr = input->flow.ip4_flow.dst_ip;
-               ip->src_addr = input->flow.ip4_flow.src_ip;
-               len += sizeof(struct ipv4_hdr);
+               ip->total_length = sizeof(struct rte_ipv4_hdr);
+               ip->next_proto_id = arfs->tuple.ip_proto;
+               ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
+               ip->dst_addr = arfs->tuple.dst_ipv4;
+               ip->src_addr = arfs->tuple.src_ipv4;
+               len += sizeof(struct rte_ipv4_hdr);
                params->ipv4 = true;
 
                raw_pkt = (uint8_t *)buff;
                /* UDP */
-               if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
-                       udp = (struct udp_hdr *)(raw_pkt + len);
-                       udp->dst_port = input->flow.udp4_flow.dst_port;
-                       udp->src_port = input->flow.udp4_flow.src_port;
-                       udp->dgram_len = sizeof(struct udp_hdr);
-                       len += sizeof(struct udp_hdr);
+               if (arfs->tuple.ip_proto == IPPROTO_UDP) {
+                       udp = (struct rte_udp_hdr *)(raw_pkt + len);
+                       udp->dst_port = arfs->tuple.dst_port;
+                       udp->src_port = arfs->tuple.src_port;
+                       udp->dgram_len = sizeof(struct rte_udp_hdr);
+                       len += sizeof(struct rte_udp_hdr);
                        /* adjust ip total_length */
-                       ip->total_length += sizeof(struct udp_hdr);
+                       ip->total_length += sizeof(struct rte_udp_hdr);
                        params->udp = true;
                } else { /* TCP */
-                       tcp = (struct tcp_hdr *)(raw_pkt + len);
-                       tcp->src_port = input->flow.tcp4_flow.src_port;
-                       tcp->dst_port = input->flow.tcp4_flow.dst_port;
+                       tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
+                       tcp->src_port = arfs->tuple.src_port;
+                       tcp->dst_port = arfs->tuple.dst_port;
                        tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
-                       len += sizeof(struct tcp_hdr);
+                       len += sizeof(struct rte_tcp_hdr);
                        /* adjust ip total_length */
-                       ip->total_length += sizeof(struct tcp_hdr);
+                       ip->total_length += sizeof(struct rte_tcp_hdr);
                        params->tcp = true;
                }
                break;
-       case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
-       case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
-               ip6 = (struct ipv6_hdr *)raw_pkt;
-               *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
-               ip6->proto = input->flow.ipv6_flow.proto ?
-                                       input->flow.ipv6_flow.proto :
-                                       next_proto[input->flow_type];
-               rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
+       case RTE_ETHER_TYPE_IPV6:
+               ip6 = (struct rte_ipv6_hdr *)raw_pkt;
+               ip6->proto = arfs->tuple.ip_proto;
+               ip6->vtc_flow =
+                       rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
+
+               rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
                           IPV6_ADDR_LEN);
-               rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
+               rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
                           IPV6_ADDR_LEN);
-               len += sizeof(struct ipv6_hdr);
+               len += sizeof(struct rte_ipv6_hdr);
+               params->ipv6 = true;
 
                raw_pkt = (uint8_t *)buff;
                /* UDP */
-               if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
-                       udp = (struct udp_hdr *)(raw_pkt + len);
-                       udp->src_port = input->flow.udp6_flow.dst_port;
-                       udp->dst_port = input->flow.udp6_flow.src_port;
-                       len += sizeof(struct udp_hdr);
+               if (arfs->tuple.ip_proto == IPPROTO_UDP) {
+                       udp = (struct rte_udp_hdr *)(raw_pkt + len);
+                       udp->src_port = arfs->tuple.src_port;
+                       udp->dst_port = arfs->tuple.dst_port;
+                       len += sizeof(struct rte_udp_hdr);
                        params->udp = true;
                } else { /* TCP */
-                       tcp = (struct tcp_hdr *)(raw_pkt + len);
-                       tcp->src_port = input->flow.tcp4_flow.src_port;
-                       tcp->dst_port = input->flow.tcp4_flow.dst_port;
+                       tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
+                       tcp->src_port = arfs->tuple.src_port;
+                       tcp->dst_port = arfs->tuple.dst_port;
                        tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
-                       len += sizeof(struct tcp_hdr);
+                       len += sizeof(struct rte_tcp_hdr);
                        params->tcp = true;
                }
                break;
        default:
-               DP_ERR(edev, "Unsupported flow_type %u\n",
-                      input->flow_type);
+               DP_ERR(edev, "Unsupported eth_proto %u\n",
+                      arfs->tuple.eth_proto);
                return 0;
        }
 
        return len;
 }
 
-static int
-qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
-                     enum rte_filter_op filter_op,
-                     void *arg)
-{
-       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct rte_eth_fdir_filter *fdir;
-       int ret;
-
-       fdir = (struct rte_eth_fdir_filter *)arg;
-       switch (filter_op) {
-       case RTE_ETH_FILTER_NOP:
-               /* Typically used to query flowdir support */
-               if (ECORE_IS_CMT(edev)) {
-                       DP_ERR(edev, "flowdir is not supported in 100G mode\n");
-                       return -ENOTSUP;
-               }
-               return 0; /* means supported */
-       case RTE_ETH_FILTER_ADD:
-               ret = qede_fdir_filter_add(eth_dev, fdir, 1);
-       break;
-       case RTE_ETH_FILTER_DELETE:
-               ret = qede_fdir_filter_add(eth_dev, fdir, 0);
-       break;
-       case RTE_ETH_FILTER_FLUSH:
-       case RTE_ETH_FILTER_UPDATE:
-       case RTE_ETH_FILTER_INFO:
-               return -ENOTSUP;
-       break;
-       default:
-               DP_ERR(edev, "unknown operation %u", filter_op);
-               ret = -EINVAL;
-       }
-
-       return ret;
-}
-
-int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
-                           enum rte_filter_op filter_op,
-                           void *arg)
-{
-       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct rte_eth_ntuple_filter *ntuple;
-       struct rte_eth_fdir_filter fdir_entry;
-       struct rte_eth_tcpv4_flow *tcpv4_flow;
-       struct rte_eth_udpv4_flow *udpv4_flow;
-       bool add = false;
-
-       switch (filter_op) {
-       case RTE_ETH_FILTER_NOP:
-               /* Typically used to query fdir support */
-               if (ECORE_IS_CMT(edev)) {
-                       DP_ERR(edev, "flowdir is not supported in 100G mode\n");
-                       return -ENOTSUP;
-               }
-               return 0; /* means supported */
-       case RTE_ETH_FILTER_ADD:
-               add = true;
-       break;
-       case RTE_ETH_FILTER_DELETE:
-       break;
-       case RTE_ETH_FILTER_INFO:
-       case RTE_ETH_FILTER_GET:
-       case RTE_ETH_FILTER_UPDATE:
-       case RTE_ETH_FILTER_FLUSH:
-       case RTE_ETH_FILTER_SET:
-       case RTE_ETH_FILTER_STATS:
-       case RTE_ETH_FILTER_OP_MAX:
-               DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
-               return -ENOTSUP;
-       }
-       ntuple = (struct rte_eth_ntuple_filter *)arg;
-       /* Internally convert ntuple to fdir entry */
-       memset(&fdir_entry, 0, sizeof(fdir_entry));
-       if (ntuple->proto == IPPROTO_TCP) {
-               fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
-               tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
-               tcpv4_flow->ip.src_ip = ntuple->src_ip;
-               tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
-               tcpv4_flow->ip.proto = IPPROTO_TCP;
-               tcpv4_flow->src_port = ntuple->src_port;
-               tcpv4_flow->dst_port = ntuple->dst_port;
-       } else {
-               fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
-               udpv4_flow = &fdir_entry.input.flow.udp4_flow;
-               udpv4_flow->ip.src_ip = ntuple->src_ip;
-               udpv4_flow->ip.dst_ip = ntuple->dst_ip;
-               udpv4_flow->ip.proto = IPPROTO_TCP;
-               udpv4_flow->src_port = ntuple->src_port;
-               udpv4_flow->dst_port = ntuple->dst_port;
-       }
-
-       fdir_entry.action.rx_queue = ntuple->queue;
-
-       return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
-}
-
 static int
 qede_tunnel_update(struct qede_dev *qdev,
                   struct ecore_tunnel_info *tunn_info)
@@ -682,36 +527,6 @@ qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
        return rc;
 }
 
-static int
-qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
-                 bool enable)
-{
-       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       enum _ecore_status_t rc = ECORE_INVAL;
-       struct ecore_tunnel_info tunn;
-
-       memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
-       tunn.ip_gre.b_update_mode = true;
-       tunn.ip_gre.b_mode_enabled = enable;
-       tunn.ip_gre.tun_cls = clss;
-       tunn.ip_gre.tun_cls = clss;
-       tunn.b_update_rx_cls = true;
-       tunn.b_update_tx_cls = true;
-
-       rc = qede_tunnel_update(qdev, &tunn);
-       if (rc == ECORE_SUCCESS) {
-               qdev->ipgre.enable = enable;
-               DP_INFO(edev, "IPGRE is %s\n",
-                       enable ? "enabled" : "disabled");
-       } else {
-               DP_ERR(edev, "Failed to update tunn_clss %u\n",
-                      clss);
-       }
-
-       return rc;
-}
-
 int
 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
                      struct rte_eth_udp_tunnel *tunnel_udp)
@@ -727,7 +542,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
        memset(&tunn, 0, sizeof(tunn));
 
        switch (tunnel_udp->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
                        DP_ERR(edev, "UDP port %u doesn't exist\n",
                                tunnel_udp->udp_port);
@@ -755,7 +570,7 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
                                        ECORE_TUNN_CLSS_MAC_VLAN, false);
 
                break;
-       case RTE_TUNNEL_TYPE_GENEVE:
+       case RTE_ETH_TUNNEL_TYPE_GENEVE:
                if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
                        DP_ERR(edev, "UDP port %u doesn't exist\n",
                                tunnel_udp->udp_port);
@@ -807,7 +622,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
        memset(&tunn, 0, sizeof(tunn));
 
        switch (tunnel_udp->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
                        DP_INFO(edev,
                                "UDP port %u for VXLAN was already configured\n",
@@ -844,7 +659,7 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 
                qdev->vxlan.udp_port = udp_port;
                break;
-       case RTE_TUNNEL_TYPE_GENEVE:
+       case RTE_ETH_TUNNEL_TYPE_GENEVE:
                if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
                        DP_INFO(edev,
                                "UDP port %u for GENEVE was already configured\n",
@@ -888,260 +703,365 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
-static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
-                                      uint32_t *clss, char *str)
+static int
+qede_flow_validate_attr(__rte_unused struct rte_eth_dev *dev,
+                       const struct rte_flow_attr *attr,
+                       struct rte_flow_error *error)
 {
-       uint16_t j;
-       *clss = MAX_ECORE_TUNN_CLSS;
-
-       for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
-               if (filter == qede_tunn_types[j].rte_filter_type) {
-                       *type = qede_tunn_types[j].qede_type;
-                       *clss = qede_tunn_types[j].qede_tunn_clss;
-                       strcpy(str, qede_tunn_types[j].string);
-                       return;
-               }
+       if (attr == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+                                  "NULL attribute");
+               return -rte_errno;
+       }
+
+       if (attr->group != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+                                  "Groups are not supported");
+               return -rte_errno;
        }
+
+       if (attr->priority != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+                                  "Priorities are not supported");
+               return -rte_errno;
+       }
+
+       if (attr->egress != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+                                  "Egress is not supported");
+               return -rte_errno;
+       }
+
+       if (attr->transfer != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+                                  "Transfer is not supported");
+               return -rte_errno;
+       }
+
+       if (attr->ingress == 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+                                  "Only ingress is supported");
+               return -rte_errno;
+       }
+
+       return 0;
 }
 
 static int
-qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
-                             const struct rte_eth_tunnel_filter_conf *conf,
-                             uint32_t type)
+qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
+                       const struct rte_flow_item pattern[],
+                       struct rte_flow_error *error,
+                       struct rte_flow *flow)
 {
-       /* Init commmon ucast params first */
-       qede_set_ucast_cmn_params(ucast);
+       bool l3 = false, l4 = false;
 
-       /* Copy out the required fields based on classification type */
-       ucast->type = type;
+       if (pattern == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+                                  "NULL pattern");
+               return -rte_errno;
+       }
 
-       switch (type) {
-       case ECORE_FILTER_VNI:
-               ucast->vni = conf->tenant_id;
-       break;
-       case ECORE_FILTER_INNER_VLAN:
-               ucast->vlan = conf->inner_vlan;
-       break;
-       case ECORE_FILTER_MAC:
-               memcpy(ucast->mac, conf->outer_mac.addr_bytes,
-                      ETHER_ADDR_LEN);
-       break;
-       case ECORE_FILTER_INNER_MAC:
-               memcpy(ucast->mac, conf->inner_mac.addr_bytes,
-                      ETHER_ADDR_LEN);
-       break;
-       case ECORE_FILTER_MAC_VNI_PAIR:
-               memcpy(ucast->mac, conf->outer_mac.addr_bytes,
-                       ETHER_ADDR_LEN);
-               ucast->vni = conf->tenant_id;
-       break;
-       case ECORE_FILTER_INNER_MAC_VNI_PAIR:
-               memcpy(ucast->mac, conf->inner_mac.addr_bytes,
-                       ETHER_ADDR_LEN);
-               ucast->vni = conf->tenant_id;
-       break;
-       case ECORE_FILTER_INNER_PAIR:
-               memcpy(ucast->mac, conf->inner_mac.addr_bytes,
-                       ETHER_ADDR_LEN);
-               ucast->vlan = conf->inner_vlan;
-       break;
-       default:
-               return -EINVAL;
+       for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+               if (!pattern->spec) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Item spec not defined");
+                       return -rte_errno;
+               }
+
+               if (pattern->last) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Item last not supported");
+                       return -rte_errno;
+               }
+
+               if (pattern->mask) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Item mask not supported");
+                       return -rte_errno;
+               }
+
+               /* Below validation is only for 4 tuple flow
+                * (GFT_PROFILE_TYPE_4_TUPLE)
+                * - src and dst L3 address (IPv4 or IPv6)
+                * - src and dst L4 port (TCP or UDP)
+                */
+
+               switch (pattern->type) {
+               case RTE_FLOW_ITEM_TYPE_IPV4:
+                       l3 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_ipv4 *spec;
+
+                               spec = pattern->spec;
+                               flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
+                               flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
+                               flow->entry.tuple.eth_proto =
+                                       RTE_ETHER_TYPE_IPV4;
+                       }
+                       break;
+
+               case RTE_FLOW_ITEM_TYPE_IPV6:
+                       l3 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_ipv6 *spec;
+
+                               spec = pattern->spec;
+                               rte_memcpy(flow->entry.tuple.src_ipv6,
+                                          spec->hdr.src_addr,
+                                          IPV6_ADDR_LEN);
+                               rte_memcpy(flow->entry.tuple.dst_ipv6,
+                                          spec->hdr.dst_addr,
+                                          IPV6_ADDR_LEN);
+                               flow->entry.tuple.eth_proto =
+                                       RTE_ETHER_TYPE_IPV6;
+                       }
+                       break;
+
+               case RTE_FLOW_ITEM_TYPE_UDP:
+                       l4 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_udp *spec;
+
+                               spec = pattern->spec;
+                               flow->entry.tuple.src_port =
+                                               spec->hdr.src_port;
+                               flow->entry.tuple.dst_port =
+                                               spec->hdr.dst_port;
+                               flow->entry.tuple.ip_proto = IPPROTO_UDP;
+                       }
+                       break;
+
+               case RTE_FLOW_ITEM_TYPE_TCP:
+                       l4 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_tcp *spec;
+
+                               spec = pattern->spec;
+                               flow->entry.tuple.src_port =
+                                               spec->hdr.src_port;
+                               flow->entry.tuple.dst_port =
+                                               spec->hdr.dst_port;
+                               flow->entry.tuple.ip_proto = IPPROTO_TCP;
+                       }
+
+                       break;
+               default:
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
+                       return -rte_errno;
+               }
+       }
+
+       if (!(l3 && l4)) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                  pattern,
+                                  "Item types need to have both L3 and L4 protocols");
+               return -rte_errno;
        }
 
-       return ECORE_SUCCESS;
+       return 0;
 }
 
 static int
-_qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
-                        const struct rte_eth_tunnel_filter_conf *conf,
-                        __attribute__((unused)) enum rte_filter_op filter_op,
-                        enum ecore_tunn_clss *clss,
-                        bool add)
+qede_flow_parse_actions(struct rte_eth_dev *dev,
+                       const struct rte_flow_action actions[],
+                       struct rte_flow_error *error,
+                       struct rte_flow *flow)
 {
-       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct ecore_filter_ucast ucast = {0};
-       enum ecore_filter_ucast_type type;
-       uint16_t filter_type = 0;
-       char str[80];
-       int rc;
+       const struct rte_flow_action_queue *queue;
 
-       filter_type = conf->filter_type;
-       /* Determine if the given filter classification is supported */
-       qede_get_ecore_tunn_params(filter_type, &type, clss, str);
-       if (*clss == MAX_ECORE_TUNN_CLSS) {
-               DP_ERR(edev, "Unsupported filter type\n");
-               return -EINVAL;
-       }
-       /* Init tunnel ucast params */
-       rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
-       if (rc != ECORE_SUCCESS) {
-               DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
-               conf->filter_type);
-               return rc;
+       if (actions == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+                                  "NULL actions");
+               return -rte_errno;
        }
-       DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
-               str, filter_op, ucast.type);
 
-       ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
+       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+               switch (actions->type) {
+               case RTE_FLOW_ACTION_TYPE_QUEUE:
+                       queue = actions->conf;
+
+                       if (queue->index >= QEDE_RSS_COUNT(dev)) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ACTION,
+                                                  actions,
+                                                  "Bad QUEUE action");
+                               return -rte_errno;
+                       }
 
-       /* Skip MAC/VLAN if filter is based on VNI */
-       if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
-               rc = qede_mac_int_ops(eth_dev, &ucast, add);
-               if (rc == 0 && add) {
-                       /* Enable accept anyvlan */
-                       qede_config_accept_any_vlan(qdev, true);
+                       if (flow)
+                               flow->entry.rx_queue = queue->index;
+
+                       break;
+               case RTE_FLOW_ACTION_TYPE_DROP:
+                       if (flow)
+                               flow->entry.is_drop = true;
+                       break;
+               default:
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ACTION,
+                                          actions,
+                                          "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported");
+                       return -rte_errno;
                }
-       } else {
-               rc = qede_ucast_filter(eth_dev, &ucast, add);
-               if (rc == 0)
-                       rc = ecore_filter_ucast_cmd(edev, &ucast,
-                                           ECORE_SPQ_MODE_CB, NULL);
        }
 
-       return rc;
+       return 0;
 }
 
 static int
-qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
-                enum rte_eth_tunnel_type tunn_type, bool enable)
+qede_flow_parse(struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item patterns[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error,
+               struct rte_flow *flow)
+
 {
-       int rc = -EINVAL;
+       int rc = 0;
 
-       switch (tunn_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
-               rc = qede_vxlan_enable(eth_dev, clss, enable);
-               break;
-       case RTE_TUNNEL_TYPE_GENEVE:
-               rc = qede_geneve_enable(eth_dev, clss, enable);
-               break;
-       case RTE_TUNNEL_TYPE_IP_IN_GRE:
-               rc = qede_ipgre_enable(eth_dev, clss, enable);
-               break;
-       default:
-               rc = -EINVAL;
-               break;
-       }
+       rc = qede_flow_validate_attr(dev, attr, error);
+       if (rc)
+               return rc;
+
+       /* parse and validate item pattern and actions.
+        * Given item list and actions will be translate to qede PMD
+        * specific arfs structure.
+        */
+       rc = qede_flow_parse_pattern(dev, patterns, error, flow);
+       if (rc)
+               return rc;
+
+       rc = qede_flow_parse_actions(dev, actions, error, flow);
 
        return rc;
 }
 
 static int
-qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
-                       enum rte_filter_op filter_op,
-                       const struct rte_eth_tunnel_filter_conf *conf)
+qede_flow_validate(struct rte_eth_dev *dev,
+                  const struct rte_flow_attr *attr,
+                  const struct rte_flow_item patterns[],
+                  const struct rte_flow_action actions[],
+                  struct rte_flow_error *error)
 {
-       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
-       bool add;
+       return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
+}
+
+static struct rte_flow *
+qede_flow_create(struct rte_eth_dev *dev,
+                const struct rte_flow_attr *attr,
+                const struct rte_flow_item pattern[],
+                const struct rte_flow_action actions[],
+                struct rte_flow_error *error)
+{
+       struct rte_flow *flow = NULL;
        int rc;
 
-       PMD_INIT_FUNC_TRACE(edev);
+       flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
+       if (flow == NULL) {
+               rte_flow_error_set(error, ENOMEM,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "Failed to allocate memory");
+               return NULL;
+       }
 
-       switch (filter_op) {
-       case RTE_ETH_FILTER_ADD:
-               add = true;
-               break;
-       case RTE_ETH_FILTER_DELETE:
-               add = false;
-               break;
-       default:
-               DP_ERR(edev, "Unsupported operation %d\n", filter_op);
-               return -EINVAL;
+       rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
+       if (rc < 0) {
+               rte_free(flow);
+               return NULL;
        }
 
-       if (IS_VF(edev))
-               return qede_tunn_enable(eth_dev,
-                                       ECORE_TUNN_CLSS_MAC_VLAN,
-                                       conf->tunnel_type, add);
+       rc = qede_config_arfs_filter(dev, &flow->entry, true);
+       if (rc < 0) {
+               rte_flow_error_set(error, rc,
+                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                  "Failed to configure flow filter");
+               rte_free(flow);
+               return NULL;
+       }
 
-       rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
-       if (rc != ECORE_SUCCESS)
-               return rc;
+       return flow;
+}
 
-       if (add) {
-               if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
-                       qdev->vxlan.num_filters++;
-                       qdev->vxlan.filter_type = conf->filter_type;
-               } else { /* GENEVE */
-                       qdev->geneve.num_filters++;
-                       qdev->geneve.filter_type = conf->filter_type;
-               }
+static int
+qede_flow_destroy(struct rte_eth_dev *eth_dev,
+                 struct rte_flow *flow,
+                 struct rte_flow_error *error)
+{
+       int rc = 0;
+
+       rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
+       if (rc < 0) {
+               rte_flow_error_set(error, rc,
+                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                  "Failed to delete flow filter");
+               rte_free(flow);
+       }
 
-               if (!qdev->vxlan.enable || !qdev->geneve.enable ||
-                   !qdev->ipgre.enable)
-                       return qede_tunn_enable(eth_dev, clss,
-                                               conf->tunnel_type,
-                                               true);
-       } else {
-               if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
-                       qdev->vxlan.num_filters--;
-               else /*GENEVE*/
-                       qdev->geneve.num_filters--;
-
-               /* Disable VXLAN if VXLAN filters become 0 */
-               if (qdev->vxlan.num_filters == 0 ||
-                   qdev->geneve.num_filters == 0)
-                       return qede_tunn_enable(eth_dev, clss,
-                                               conf->tunnel_type,
-                                               false);
+       return rc;
+}
+
+static int
+qede_flow_flush(struct rte_eth_dev *eth_dev,
+               struct rte_flow_error *error)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct qede_arfs_entry *tmp = NULL;
+       int rc = 0;
+
+       while (!SLIST_EMPTY(&qdev->arfs_info.arfs_list_head)) {
+               tmp = SLIST_FIRST(&qdev->arfs_info.arfs_list_head);
+
+               rc = qede_config_arfs_filter(eth_dev, tmp, false);
+               if (rc < 0)
+                       rte_flow_error_set(error, rc,
+                                          RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                          "Failed to flush flow filter");
        }
 
-       return 0;
+       return rc;
 }
 
-int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
-                        enum rte_filter_type filter_type,
-                        enum rte_filter_op filter_op,
-                        void *arg)
+const struct rte_flow_ops qede_flow_ops = {
+       .validate = qede_flow_validate,
+       .create = qede_flow_create,
+       .destroy = qede_flow_destroy,
+       .flush = qede_flow_flush,
+};
+
+int
+qede_dev_flow_ops_get(struct rte_eth_dev *eth_dev,
+                     const struct rte_flow_ops **ops)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct rte_eth_tunnel_filter_conf *filter_conf =
-                       (struct rte_eth_tunnel_filter_conf *)arg;
-
-       switch (filter_type) {
-       case RTE_ETH_FILTER_TUNNEL:
-               switch (filter_conf->tunnel_type) {
-               case RTE_TUNNEL_TYPE_VXLAN:
-               case RTE_TUNNEL_TYPE_GENEVE:
-               case RTE_TUNNEL_TYPE_IP_IN_GRE:
-                       DP_INFO(edev,
-                               "Packet steering to the specified Rx queue"
-                               " is not supported with UDP tunneling");
-                       return(qede_tunn_filter_config(eth_dev, filter_op,
-                                                     filter_conf));
-               case RTE_TUNNEL_TYPE_TEREDO:
-               case RTE_TUNNEL_TYPE_NVGRE:
-               case RTE_L2_TUNNEL_TYPE_E_TAG:
-                       DP_ERR(edev, "Unsupported tunnel type %d\n",
-                               filter_conf->tunnel_type);
-                       return -EINVAL;
-               case RTE_TUNNEL_TYPE_NONE:
-               default:
-                       return 0;
-               }
-               break;
-       case RTE_ETH_FILTER_FDIR:
-               return qede_fdir_filter_conf(eth_dev, filter_op, arg);
-       case RTE_ETH_FILTER_NTUPLE:
-               return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
-       case RTE_ETH_FILTER_MACVLAN:
-       case RTE_ETH_FILTER_ETHERTYPE:
-       case RTE_ETH_FILTER_FLEXIBLE:
-       case RTE_ETH_FILTER_SYN:
-       case RTE_ETH_FILTER_HASH:
-       case RTE_ETH_FILTER_L2_TUNNEL:
-       case RTE_ETH_FILTER_MAX:
-       default:
-               DP_ERR(edev, "Unsupported filter type %d\n",
-                       filter_type);
-               return -EINVAL;
+
+       if (ECORE_IS_CMT(edev)) {
+               DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+               return -ENOTSUP;
        }
 
+       *ops = &qede_flow_ops;
        return 0;
 }
-
-/* RTE_FLOW */