net: add rte prefix to ether structures
[dpdk.git] / drivers / net / qede / qede_filter.c
index 4b709e6..792b9ef 100644 (file)
@@ -8,6 +8,7 @@
 #include <rte_tcp.h>
 #include <rte_sctp.h>
 #include <rte_errno.h>
+#include <rte_flow_driver.h>
 
 #include "qede_ethdev.h"
 
@@ -129,10 +130,6 @@ const struct _qede_udp_tunn_types {
  */
 #define QEDE_MAX_FDIR_PKT_LEN                  (86)
 
-#ifndef IPV6_ADDR_LEN
-#define IPV6_ADDR_LEN                          (16)
-#endif
-
 static inline bool qede_valid_flow(uint16_t flow_type)
 {
        return  ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
@@ -141,6 +138,12 @@ static inline bool qede_valid_flow(uint16_t flow_type)
                 (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
 }
 
+static uint16_t
+qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
+                       struct qede_arfs_entry *arfs,
+                       void *buff,
+                       struct ecore_arfs_config_params *params);
+
 /* Note: Flowdir support is only partial.
  * For ex: drop_queue, FDIR masks, flex_conf are not supported.
  * Parameters like pballoc/status fields are irrelevant here.
@@ -154,17 +157,19 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
        /* check FDIR modes */
        switch (fdir->mode) {
        case RTE_FDIR_MODE_NONE:
-               qdev->fdir_info.arfs.arfs_enable = false;
+               qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
                DP_INFO(edev, "flowdir is disabled\n");
        break;
        case RTE_FDIR_MODE_PERFECT:
                if (ECORE_IS_CMT(edev)) {
                        DP_ERR(edev, "flowdir is not supported in 100G mode\n");
-                       qdev->fdir_info.arfs.arfs_enable = false;
+                       qdev->arfs_info.arfs.mode =
+                               ECORE_FILTER_CONFIG_MODE_DISABLE;
                        return -ENOTSUP;
                }
-               qdev->fdir_info.arfs.arfs_enable = true;
-               DP_INFO(edev, "flowdir is enabled\n");
+               qdev->arfs_info.arfs.mode =
+                               ECORE_FILTER_CONFIG_MODE_5_TUPLE;
+               DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
        break;
        case RTE_FDIR_MODE_PERFECT_TUNNEL:
        case RTE_FDIR_MODE_SIGNATURE:
@@ -179,29 +184,96 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
 void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct qede_fdir_entry *tmp = NULL;
+       struct qede_arfs_entry *tmp = NULL;
 
-       SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+       SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
                if (tmp) {
                        if (tmp->mz)
                                rte_memzone_free(tmp->mz);
-                       SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
-                                    qede_fdir_entry, list);
+                       SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
+                                    qede_arfs_entry, list);
                        rte_free(tmp);
                }
        }
 }
 
 static int
-qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
-                           struct rte_eth_fdir_filter *fdir_filter,
-                           bool add)
+qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
+                        struct rte_eth_fdir_filter *fdir,
+                        struct qede_arfs_entry *arfs)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct rte_eth_fdir_input *input;
+
+       static const uint8_t next_proto[] = {
+               [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+               [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+               [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+               [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+       };
+
+       input = &fdir->input;
+
+       DP_INFO(edev, "flow_type %d\n", input->flow_type);
+
+       switch (input->flow_type) {
+       case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+       case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+               /* fill the common ip header */
+               arfs->tuple.eth_proto = ETHER_TYPE_IPv4;
+               arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
+               arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
+               arfs->tuple.ip_proto = next_proto[input->flow_type];
+
+               /* UDP */
+               if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+                       arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
+                       arfs->tuple.src_port = input->flow.udp4_flow.src_port;
+               } else { /* TCP */
+                       arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
+                       arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
+               }
+               break;
+       case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+       case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+               arfs->tuple.eth_proto = ETHER_TYPE_IPv6;
+               arfs->tuple.ip_proto = next_proto[input->flow_type];
+               rte_memcpy(arfs->tuple.dst_ipv6,
+                          &input->flow.ipv6_flow.dst_ip,
+                          IPV6_ADDR_LEN);
+               rte_memcpy(arfs->tuple.src_ipv6,
+                          &input->flow.ipv6_flow.src_ip,
+                          IPV6_ADDR_LEN);
+
+               /* UDP */
+               if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+                       arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
+                       arfs->tuple.src_port = input->flow.udp6_flow.src_port;
+               } else { /* TCP */
+                       arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
+                       arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
+               }
+               break;
+       default:
+               DP_ERR(edev, "Unsupported flow_type %u\n",
+                      input->flow_type);
+               return -ENOTSUP;
+       }
+
+       arfs->rx_queue = fdir->action.rx_queue;
+       return 0;
+}
+
+static int
+qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
+                       struct qede_arfs_entry *arfs,
+                       bool add)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
-       struct qede_fdir_entry *tmp = NULL;
-       struct qede_fdir_entry *fdir = NULL;
+       struct qede_arfs_entry *tmp = NULL;
        const struct rte_memzone *mz;
        struct ecore_hwfn *p_hwfn;
        enum _ecore_status_t rc;
@@ -209,17 +281,12 @@ qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
        void *pkt;
 
        if (add) {
-               if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
+               if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
                        DP_ERR(edev, "Reached max flowdir filter limit\n");
                        return -EINVAL;
                }
-               fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
-                                 RTE_CACHE_LINE_SIZE);
-               if (!fdir) {
-                       DP_ERR(edev, "Did not allocate memory for fdir\n");
-                       return -ENOMEM;
-               }
        }
+
        /* soft_id could have been used as memzone string, but soft_id is
         * not currently used so it has no significance.
         */
@@ -230,98 +297,124 @@ qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
        if (!mz) {
                DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
                       rte_strerror(rte_errno));
-               rc = -rte_errno;
-               goto err1;
+               return -rte_errno;
        }
 
        pkt = mz->addr;
        memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
-       pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
-                                         &qdev->fdir_info.arfs);
+       pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
+                                         &qdev->arfs_info.arfs);
        if (pkt_len == 0) {
                rc = -EINVAL;
-               goto err2;
+               goto err1;
        }
+
        DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
        if (add) {
-               SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+               SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
                        if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
                                DP_INFO(edev, "flowdir filter exist\n");
-                               rc = 0;
-                               goto err2;
+                               rc = -EEXIST;
+                               goto err1;
                        }
                }
        } else {
-               SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+               SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
                        if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
                                break;
                }
                if (!tmp) {
                        DP_ERR(edev, "flowdir filter does not exist\n");
                        rc = -EEXIST;
-                       goto err2;
+                       goto err1;
                }
        }
        p_hwfn = ECORE_LEADING_HWFN(edev);
        if (add) {
-               if (!qdev->fdir_info.arfs.arfs_enable) {
+               if (qdev->arfs_info.arfs.mode ==
+                       ECORE_FILTER_CONFIG_MODE_DISABLE) {
                        /* Force update */
                        eth_dev->data->dev_conf.fdir_conf.mode =
                                                RTE_FDIR_MODE_PERFECT;
-                       qdev->fdir_info.arfs.arfs_enable = true;
+                       qdev->arfs_info.arfs.mode =
+                                       ECORE_FILTER_CONFIG_MODE_5_TUPLE;
                        DP_INFO(edev, "Force enable flowdir in perfect mode\n");
                }
                /* Enable ARFS searcher with updated flow_types */
                ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
-                                         &qdev->fdir_info.arfs);
+                                         &qdev->arfs_info.arfs);
        }
        /* configure filter with ECORE_SPQ_MODE_EBLOCK */
        rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
                                               (dma_addr_t)mz->iova,
                                               pkt_len,
-                                              fdir_filter->action.rx_queue,
+                                              arfs->rx_queue,
                                               0, add);
        if (rc == ECORE_SUCCESS) {
                if (add) {
-                       fdir->rx_queue = fdir_filter->action.rx_queue;
-                       fdir->pkt_len = pkt_len;
-                       fdir->mz = mz;
-                       SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
-                                         fdir, list);
-                       qdev->fdir_info.filter_count++;
+                       arfs->pkt_len = pkt_len;
+                       arfs->mz = mz;
+                       SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
+                                         arfs, list);
+                       qdev->arfs_info.filter_count++;
                        DP_INFO(edev, "flowdir filter added, count = %d\n",
-                               qdev->fdir_info.filter_count);
+                               qdev->arfs_info.filter_count);
                } else {
                        rte_memzone_free(tmp->mz);
-                       SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
-                                    qede_fdir_entry, list);
+                       SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
+                                    qede_arfs_entry, list);
                        rte_free(tmp); /* the node deleted */
                        rte_memzone_free(mz); /* temp node allocated */
-                       qdev->fdir_info.filter_count--;
+                       qdev->arfs_info.filter_count--;
                        DP_INFO(edev, "Fdir filter deleted, count = %d\n",
-                               qdev->fdir_info.filter_count);
+                               qdev->arfs_info.filter_count);
                }
        } else {
                DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
-                      rc, qdev->fdir_info.filter_count);
+                      rc, qdev->arfs_info.filter_count);
        }
 
        /* Disable ARFS searcher if there are no more filters */
-       if (qdev->fdir_info.filter_count == 0) {
-               memset(&qdev->fdir_info.arfs, 0,
+       if (qdev->arfs_info.filter_count == 0) {
+               memset(&qdev->arfs_info.arfs, 0,
                       sizeof(struct ecore_arfs_config_params));
                DP_INFO(edev, "Disabling flowdir\n");
-               qdev->fdir_info.arfs.arfs_enable = false;
+               qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
                ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
-                                         &qdev->fdir_info.arfs);
+                                         &qdev->arfs_info.arfs);
        }
        return 0;
 
-err2:
-       rte_memzone_free(mz);
 err1:
-       if (add)
-               rte_free(fdir);
+       rte_memzone_free(mz);
+       return rc;
+}
+
+static int
+qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
+                           struct rte_eth_fdir_filter *fdir_filter,
+                           bool add)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct qede_arfs_entry *arfs = NULL;
+       int rc = 0;
+
+       arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
+                                 RTE_CACHE_LINE_SIZE);
+       if (!arfs) {
+               DP_ERR(edev, "Did not allocate memory for arfs\n");
+               return -ENOMEM;
+       }
+
+       rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
+       if (rc < 0)
+               return rc;
+
+       rc = qede_config_arfs_filter(eth_dev, arfs, add);
+       if (rc < 0)
+               rte_free(arfs);
+
        return rc;
 }
 
@@ -353,9 +446,9 @@ qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
 }
 
 /* Fills the L3/L4 headers and returns the actual length  of flowdir packet */
-uint16_t
-qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
-                       struct rte_eth_fdir_filter *fdir,
+static uint16_t
+qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
+                       struct qede_arfs_entry *arfs,
                        void *buff,
                        struct ecore_arfs_config_params *params)
 
@@ -364,64 +457,39 @@ qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        uint16_t *ether_type;
        uint8_t *raw_pkt;
-       struct rte_eth_fdir_input *input;
-       static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
        struct ipv4_hdr *ip;
        struct ipv6_hdr *ip6;
        struct udp_hdr *udp;
        struct tcp_hdr *tcp;
        uint16_t len;
-       static const uint8_t next_proto[] = {
-               [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
-               [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
-               [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
-               [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
-       };
+
        raw_pkt = (uint8_t *)buff;
-       input = &fdir->input;
-       DP_INFO(edev, "flow_type %d\n", input->flow_type);
 
-       len =  2 * sizeof(struct ether_addr);
-       raw_pkt += 2 * sizeof(struct ether_addr);
-       if (input->flow_ext.vlan_tci) {
-               DP_INFO(edev, "adding VLAN header\n");
-               rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
-               rte_memcpy(raw_pkt + sizeof(uint16_t),
-                          &input->flow_ext.vlan_tci,
-                          sizeof(uint16_t));
-               raw_pkt += sizeof(vlan_frame);
-               len += sizeof(vlan_frame);
-       }
+       len =  2 * sizeof(struct rte_ether_addr);
+       raw_pkt += 2 * sizeof(struct rte_ether_addr);
        ether_type = (uint16_t *)raw_pkt;
        raw_pkt += sizeof(uint16_t);
        len += sizeof(uint16_t);
 
-       switch (input->flow_type) {
-       case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
-       case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
-               /* fill the common ip header */
+       *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
+       switch (arfs->tuple.eth_proto) {
+       case ETHER_TYPE_IPv4:
                ip = (struct ipv4_hdr *)raw_pkt;
-               *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
                ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
                ip->total_length = sizeof(struct ipv4_hdr);
-               ip->next_proto_id = input->flow.ip4_flow.proto ?
-                                   input->flow.ip4_flow.proto :
-                                   next_proto[input->flow_type];
-               ip->time_to_live = input->flow.ip4_flow.ttl ?
-                                  input->flow.ip4_flow.ttl :
-                                  QEDE_FDIR_IPV4_DEF_TTL;
-               ip->type_of_service = input->flow.ip4_flow.tos;
-               ip->dst_addr = input->flow.ip4_flow.dst_ip;
-               ip->src_addr = input->flow.ip4_flow.src_ip;
+               ip->next_proto_id = arfs->tuple.ip_proto;
+               ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
+               ip->dst_addr = arfs->tuple.dst_ipv4;
+               ip->src_addr = arfs->tuple.src_ipv4;
                len += sizeof(struct ipv4_hdr);
                params->ipv4 = true;
 
                raw_pkt = (uint8_t *)buff;
                /* UDP */
-               if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+               if (arfs->tuple.ip_proto == IPPROTO_UDP) {
                        udp = (struct udp_hdr *)(raw_pkt + len);
-                       udp->dst_port = input->flow.udp4_flow.dst_port;
-                       udp->src_port = input->flow.udp4_flow.src_port;
+                       udp->dst_port = arfs->tuple.dst_port;
+                       udp->src_port = arfs->tuple.src_port;
                        udp->dgram_len = sizeof(struct udp_hdr);
                        len += sizeof(struct udp_hdr);
                        /* adjust ip total_length */
@@ -429,8 +497,8 @@ qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
                        params->udp = true;
                } else { /* TCP */
                        tcp = (struct tcp_hdr *)(raw_pkt + len);
-                       tcp->src_port = input->flow.tcp4_flow.src_port;
-                       tcp->dst_port = input->flow.tcp4_flow.dst_port;
+                       tcp->src_port = arfs->tuple.src_port;
+                       tcp->dst_port = arfs->tuple.dst_port;
                        tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
                        len += sizeof(struct tcp_hdr);
                        /* adjust ip total_length */
@@ -438,43 +506,39 @@ qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
                        params->tcp = true;
                }
                break;
-       case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
-       case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+       case ETHER_TYPE_IPv6:
                ip6 = (struct ipv6_hdr *)raw_pkt;
-               *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
-               ip6->proto = input->flow.ipv6_flow.proto ?
-                                       input->flow.ipv6_flow.proto :
-                                       next_proto[input->flow_type];
+               ip6->proto = arfs->tuple.ip_proto;
                ip6->vtc_flow =
                        rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
 
-               rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.src_ip,
+               rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
                           IPV6_ADDR_LEN);
-               rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.dst_ip,
+               rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
                           IPV6_ADDR_LEN);
                len += sizeof(struct ipv6_hdr);
                params->ipv6 = true;
 
                raw_pkt = (uint8_t *)buff;
                /* UDP */
-               if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+               if (arfs->tuple.ip_proto == IPPROTO_UDP) {
                        udp = (struct udp_hdr *)(raw_pkt + len);
-                       udp->src_port = input->flow.udp6_flow.src_port;
-                       udp->dst_port = input->flow.udp6_flow.dst_port;
+                       udp->src_port = arfs->tuple.src_port;
+                       udp->dst_port = arfs->tuple.dst_port;
                        len += sizeof(struct udp_hdr);
                        params->udp = true;
                } else { /* TCP */
                        tcp = (struct tcp_hdr *)(raw_pkt + len);
-                       tcp->src_port = input->flow.tcp6_flow.src_port;
-                       tcp->dst_port = input->flow.tcp6_flow.dst_port;
+                       tcp->src_port = arfs->tuple.src_port;
+                       tcp->dst_port = arfs->tuple.dst_port;
                        tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
                        len += sizeof(struct tcp_hdr);
                        params->tcp = true;
                }
                break;
        default:
-               DP_ERR(edev, "Unsupported flow_type %u\n",
-                      input->flow_type);
+               DP_ERR(edev, "Unsupported eth_proto %u\n",
+                      arfs->tuple.eth_proto);
                return 0;
        }
 
@@ -1096,6 +1160,327 @@ qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
+static int
+qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev,
+                       const struct rte_flow_attr *attr,
+                       struct rte_flow_error *error)
+{
+       if (attr == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+                                  "NULL attribute");
+               return -rte_errno;
+       }
+
+       if (attr->group != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+                                  "Groups are not supported");
+               return -rte_errno;
+       }
+
+       if (attr->priority != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+                                  "Priorities are not supported");
+               return -rte_errno;
+       }
+
+       if (attr->egress != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+                                  "Egress is not supported");
+               return -rte_errno;
+       }
+
+       if (attr->transfer != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+                                  "Transfer is not supported");
+               return -rte_errno;
+       }
+
+       if (attr->ingress == 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+                                  "Only ingress is supported");
+               return -rte_errno;
+       }
+
+       return 0;
+}
+
+static int
+qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,
+                       const struct rte_flow_item pattern[],
+                       struct rte_flow_error *error,
+                       struct rte_flow *flow)
+{
+       bool l3 = false, l4 = false;
+
+       if (pattern == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+                                  "NULL pattern");
+               return -rte_errno;
+       }
+
+       for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+               if (!pattern->spec) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Item spec not defined");
+                       return -rte_errno;
+               }
+
+               if (pattern->last) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Item last not supported");
+                       return -rte_errno;
+               }
+
+               if (pattern->mask) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Item mask not supported");
+                       return -rte_errno;
+               }
+
+               /* Below validation is only for 4 tuple flow
+                * (GFT_PROFILE_TYPE_4_TUPLE)
+                * - src and dst L3 address (IPv4 or IPv6)
+                * - src and dst L4 port (TCP or UDP)
+                */
+
+               switch (pattern->type) {
+               case RTE_FLOW_ITEM_TYPE_IPV4:
+                       l3 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_ipv4 *spec;
+
+                               spec = pattern->spec;
+                               flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
+                               flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
+                               flow->entry.tuple.eth_proto = ETHER_TYPE_IPv4;
+                       }
+                       break;
+
+               case RTE_FLOW_ITEM_TYPE_IPV6:
+                       l3 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_ipv6 *spec;
+
+                               spec = pattern->spec;
+                               rte_memcpy(flow->entry.tuple.src_ipv6,
+                                          spec->hdr.src_addr,
+                                          IPV6_ADDR_LEN);
+                               rte_memcpy(flow->entry.tuple.dst_ipv6,
+                                          spec->hdr.dst_addr,
+                                          IPV6_ADDR_LEN);
+                               flow->entry.tuple.eth_proto = ETHER_TYPE_IPv6;
+                       }
+                       break;
+
+               case RTE_FLOW_ITEM_TYPE_UDP:
+                       l4 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_udp *spec;
+
+                               spec = pattern->spec;
+                               flow->entry.tuple.src_port =
+                                               spec->hdr.src_port;
+                               flow->entry.tuple.dst_port =
+                                               spec->hdr.dst_port;
+                               flow->entry.tuple.ip_proto = IPPROTO_UDP;
+                       }
+                       break;
+
+               case RTE_FLOW_ITEM_TYPE_TCP:
+                       l4 = true;
+
+                       if (flow) {
+                               const struct rte_flow_item_tcp *spec;
+
+                               spec = pattern->spec;
+                               flow->entry.tuple.src_port =
+                                               spec->hdr.src_port;
+                               flow->entry.tuple.dst_port =
+                                               spec->hdr.dst_port;
+                               flow->entry.tuple.ip_proto = IPPROTO_TCP;
+                       }
+
+                       break;
+               default:
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          pattern,
+                                          "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
+                       return -rte_errno;
+               }
+       }
+
+       if (!(l3 && l4)) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                  pattern,
+                                  "Item types need to have both L3 and L4 protocols");
+               return -rte_errno;
+       }
+
+       return 0;
+}
+
+static int
+qede_flow_parse_actions(struct rte_eth_dev *dev,
+                       const struct rte_flow_action actions[],
+                       struct rte_flow_error *error,
+                       struct rte_flow *flow)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+       const struct rte_flow_action_queue *queue;
+
+       if (actions == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+                                  "NULL actions");
+               return -rte_errno;
+       }
+
+       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+               switch (actions->type) {
+               case RTE_FLOW_ACTION_TYPE_QUEUE:
+                       queue = actions->conf;
+
+                       if (queue->index >= QEDE_RSS_COUNT(qdev)) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ACTION,
+                                                  actions,
+                                                  "Bad QUEUE action");
+                               return -rte_errno;
+                       }
+
+                       if (flow)
+                               flow->entry.rx_queue = queue->index;
+
+                       break;
+
+               default:
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ACTION,
+                                          actions,
+                                          "Action is not supported - only ACTION_TYPE_QUEUE supported");
+                       return -rte_errno;
+               }
+       }
+
+       return 0;
+}
+
+static int
+qede_flow_parse(struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item patterns[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error,
+               struct rte_flow *flow)
+
+{
+       int rc = 0;
+
+       rc = qede_flow_validate_attr(dev, attr, error);
+       if (rc)
+               return rc;
+
+       /* parse and validate item pattern and actions.
+        * Given item list and actions will be translate to qede PMD
+        * specific arfs structure.
+        */
+       rc = qede_flow_parse_pattern(dev, patterns, error, flow);
+       if (rc)
+               return rc;
+
+       rc = qede_flow_parse_actions(dev, actions, error, flow);
+
+       return rc;
+}
+
+static int
+qede_flow_validate(struct rte_eth_dev *dev,
+                  const struct rte_flow_attr *attr,
+                  const struct rte_flow_item patterns[],
+                  const struct rte_flow_action actions[],
+                  struct rte_flow_error *error)
+{
+       return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
+}
+
+static struct rte_flow *
+qede_flow_create(struct rte_eth_dev *dev,
+                const struct rte_flow_attr *attr,
+                const struct rte_flow_item pattern[],
+                const struct rte_flow_action actions[],
+                struct rte_flow_error *error)
+{
+       struct rte_flow *flow = NULL;
+       int rc;
+
+       flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
+       if (flow == NULL) {
+               rte_flow_error_set(error, ENOMEM,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "Failed to allocate memory");
+               return NULL;
+       }
+
+       rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
+       if (rc < 0) {
+               rte_free(flow);
+               return NULL;
+       }
+
+       rc = qede_config_arfs_filter(dev, &flow->entry, true);
+       if (rc < 0) {
+               rte_flow_error_set(error, rc,
+                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                  "Failed to configure flow filter");
+               rte_free(flow);
+               return NULL;
+       }
+
+       return flow;
+}
+
+static int
+qede_flow_destroy(struct rte_eth_dev *eth_dev,
+                 struct rte_flow *flow,
+                 struct rte_flow_error *error)
+{
+       int rc = 0;
+
+       rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
+       if (rc < 0) {
+               rte_flow_error_set(error, rc,
+                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                  "Failed to delete flow filter");
+               rte_free(flow);
+       }
+
+       return rc;
+}
+
+const struct rte_flow_ops qede_flow_ops = {
+       .validate = qede_flow_validate,
+       .create = qede_flow_create,
+       .destroy = qede_flow_destroy,
+};
+
 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
                         enum rte_filter_type filter_type,
                         enum rte_filter_op filter_op,
@@ -1132,6 +1517,17 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
                return qede_fdir_filter_conf(eth_dev, filter_op, arg);
        case RTE_ETH_FILTER_NTUPLE:
                return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+       case RTE_ETH_FILTER_GENERIC:
+               if (ECORE_IS_CMT(edev)) {
+                       DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+                       return -ENOTSUP;
+               }
+
+               if (filter_op != RTE_ETH_FILTER_GET)
+                       return -EINVAL;
+
+               *(const void **)arg = &qede_flow_ops;
+               return 0;
        case RTE_ETH_FILTER_MACVLAN:
        case RTE_ETH_FILTER_ETHERTYPE:
        case RTE_ETH_FILTER_FLEXIBLE:
@@ -1147,5 +1543,3 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
 
        return 0;
 }
-
-/* RTE_FLOW */