net/dpaa2: support generic flow
authorSunil Kumar Kori <sunil.kori@nxp.com>
Fri, 22 Feb 2019 11:16:05 +0000 (11:16 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 1 Mar 2019 17:17:35 +0000 (18:17 +0100)
Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
drivers/net/dpaa2/Makefile
drivers/net/dpaa2/base/dpaa2_hw_dpni.c
drivers/net/dpaa2/dpaa2_ethdev.c
drivers/net/dpaa2/dpaa2_ethdev.h
drivers/net/dpaa2/dpaa2_flow.c [new file with mode: 0644]
drivers/net/dpaa2/mc/dpni.c
drivers/net/dpaa2/mc/fsl_dpni.h
drivers/net/dpaa2/mc/fsl_dpni_cmd.h
drivers/net/dpaa2/meson.build

index 5625511..8bd269b 100644 (file)
@@ -33,6 +33,7 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_flow.c
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_mux.c
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpni.c
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpkg.c
index 11f1493..56e2e56 100644 (file)
@@ -23,7 +23,7 @@
 
 #include "../dpaa2_ethdev.h"
 
-static int
+int
 dpaa2_distset_to_dpkg_profile_cfg(
                uint64_t req_dist_set,
                struct dpkg_profile_cfg *kg_cfg);
@@ -170,7 +170,7 @@ int dpaa2_remove_flow_dist(
        return ret;
 }
 
-static int
+int
 dpaa2_distset_to_dpkg_profile_cfg(
                uint64_t req_dist_set,
                struct dpkg_profile_cfg *kg_cfg)
index 08a95a1..a8f0e30 100644 (file)
@@ -17,6 +17,7 @@
 #include <rte_kvargs.h>
 #include <rte_dev.h>
 #include <rte_fslmc.h>
+#include <rte_flow_driver.h>
 
 #include "dpaa2_pmd_logs.h"
 #include <fslmc_vfio.h>
@@ -83,6 +84,14 @@ static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
        {"egress_confirmed_frames", 2, 4},
 };
 
+static const enum rte_filter_op dpaa2_supported_filter_ops[] = {
+       RTE_ETH_FILTER_ADD,
+       RTE_ETH_FILTER_DELETE,
+       RTE_ETH_FILTER_UPDATE,
+       RTE_ETH_FILTER_FLUSH,
+       RTE_ETH_FILTER_GET
+};
+
 static struct rte_dpaa2_driver rte_dpaa2_pmd;
 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
@@ -1892,6 +1901,47 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
        return ret;
 }
 
+static inline int
+dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op)
+{
+       unsigned int i;
+
+       for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) {
+               if (dpaa2_supported_filter_ops[i] == filter_op)
+                       return 0;
+       }
+       return -ENOTSUP;
+}
+
+static int
+dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
+                   enum rte_filter_type filter_type,
+                                enum rte_filter_op filter_op,
+                                void *arg)
+{
+       int ret = 0;
+
+       if (!dev)
+               return -ENODEV;
+
+       switch (filter_type) {
+       case RTE_ETH_FILTER_GENERIC:
+               if (dpaa2_dev_verify_filter_ops(filter_op) < 0) {
+                       ret = -ENOTSUP;
+                       break;
+               }
+               *(const void **)arg = &dpaa2_flow_ops;
+               dpaa2_filter_type |= filter_type;
+               break;
+       default:
+               RTE_LOG(ERR, PMD, "Filter type (%d) not supported",
+                       filter_type);
+               ret = -ENOTSUP;
+               break;
+       }
+       return ret;
+}
+
 static struct eth_dev_ops dpaa2_ethdev_ops = {
        .dev_configure    = dpaa2_eth_dev_configure,
        .dev_start            = dpaa2_dev_start,
@@ -1930,6 +1980,7 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
        .mac_addr_set         = dpaa2_dev_set_mac_addr,
        .rss_hash_update      = dpaa2_dev_rss_hash_update,
        .rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
+       .filter_ctrl          = dpaa2_dev_flow_ctrl,
 };
 
 /* Populate the mac address from physically available (u-boot/firmware) and/or
@@ -2046,7 +2097,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        struct dpni_attr attr;
        struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
        struct dpni_buffer_layout layout;
-       int ret, hw_id;
+       int ret, hw_id, i;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -2102,11 +2153,8 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
 
        priv->num_rx_tc = attr.num_rx_tcs;
 
-       /* Resetting the "num_rx_queues" to equal number of queues in first TC
-        * as only one TC is supported on Rx Side. Once Multiple TCs will be
-        * in use for Rx processing then this will be changed or removed.
-        */
-       priv->nb_rx_queues = attr.num_queues;
+       for (i = 0; i < attr.num_rx_tcs; i++)
+               priv->nb_rx_queues += attr.num_queues;
 
        /* Using number of TX queues as number of TX TCs */
        priv->nb_tx_queues = attr.num_tx_tcs;
@@ -2184,6 +2232,26 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        }
        eth_dev->tx_pkt_burst = dpaa2_dev_tx;
 
+       /*Init fields w.r.t. classficaition*/
+       memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg));
+       priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
+       if (!priv->extract.qos_extract_param) {
+               DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
+                           " classificaiton ", ret);
+               goto init_err;
+       }
+       for (i = 0; i < MAX_TCS; i++) {
+               memset(&priv->extract.fs_key_cfg[i], 0,
+                       sizeof(struct dpkg_profile_cfg));
+               priv->extract.fs_extract_param[i] =
+                       (size_t)rte_malloc(NULL, 256, 64);
+               if (!priv->extract.fs_extract_param[i]) {
+                       DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
+                                    ret);
+                       goto init_err;
+               }
+       }
+
        RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
        return 0;
 init_err:
@@ -2196,7 +2264,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
 {
        struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
        struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-       int ret;
+       int i, ret;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -2224,6 +2292,14 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
        priv->hw = NULL;
        rte_free(dpni);
 
+       for (i = 0; i < MAX_TCS; i++) {
+               if (priv->extract.fs_extract_param[i])
+                       rte_free((void *)(size_t)priv->extract.fs_extract_param[i]);
+       }
+
+       if (priv->extract.qos_extract_param)
+               rte_free((void *)(size_t)priv->extract.qos_extract_param);
+
        eth_dev->dev_ops = NULL;
        eth_dev->rx_pkt_burst = NULL;
        eth_dev->tx_pkt_burst = NULL;
index 7148104..0ef1bf3 100644 (file)
 /* enable timestamp in mbuf*/
 extern enum pmd_dpaa2_ts dpaa2_enable_ts;
 
+#define DPAA2_QOS_TABLE_RECONFIGURE    1
+#define DPAA2_FS_TABLE_RECONFIGURE     2
+
+/*Externaly defined*/
+extern const struct rte_flow_ops dpaa2_flow_ops;
+extern enum rte_filter_type dpaa2_filter_type;
+
 struct dpaa2_dev_priv {
        void *hw;
        int32_t hw_id;
@@ -107,8 +114,23 @@ struct dpaa2_dev_priv {
        uint8_t flags; /*dpaa2 config flags */
        uint8_t en_ordered;
        uint8_t en_loose_ordered;
+
+       struct pattern_s {
+               uint8_t item_count;
+               uint8_t pattern_type[DPKG_MAX_NUM_OF_EXTRACTS];
+       } pattern[MAX_TCS + 1];
+
+       struct extract_s {
+               struct dpkg_profile_cfg qos_key_cfg;
+               struct dpkg_profile_cfg fs_key_cfg[MAX_TCS];
+               uint64_t qos_extract_param;
+               uint64_t fs_extract_param[MAX_TCS];
+       } extract;
 };
 
+int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
+                                     struct dpkg_profile_cfg *kg_cfg);
+
 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
                          uint64_t req_dist_set);
 
diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c
new file mode 100644 (file)
index 0000000..20de3da
--- /dev/null
@@ -0,0 +1,1972 @@
+/* * SPDX-License-Identifier: BSD-3-Clause
+ *   Copyright 2018 NXP
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_eth_ctrl.h>
+#include <rte_malloc.h>
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include <fsl_dpni.h>
+#include <fsl_dpkg.h>
+
+#include <dpaa2_ethdev.h>
+#include <dpaa2_pmd_logs.h>
+
+struct rte_flow {
+       struct dpni_rule_cfg rule;
+       uint8_t key_size;
+       uint8_t tc_id;
+       uint8_t flow_type;
+       uint8_t index;
+       enum rte_flow_action_type action;
+       uint16_t flow_id;
+};
+
+/* Layout for rule compositions for supported patterns */
+/* TODO: Current design only supports Ethernet + IPv4 based classification. */
+/* So corresponding offset macros are valid only. Rest are placeholder for */
+/* now. Once support for other netwrok headers will be added then */
+/* corresponding macros will be updated with correct values*/
+#define DPAA2_CLS_RULE_OFFSET_ETH      0       /*Start of buffer*/
+#define DPAA2_CLS_RULE_OFFSET_VLAN     14      /* DPAA2_CLS_RULE_OFFSET_ETH */
+                                               /*      + Sizeof Eth fields  */
+#define DPAA2_CLS_RULE_OFFSET_IPV4     14      /* DPAA2_CLS_RULE_OFFSET_VLAN */
+                                               /*      + Sizeof VLAN fields */
+#define DPAA2_CLS_RULE_OFFSET_IPV6     25      /* DPAA2_CLS_RULE_OFFSET_IPV4 */
+                                               /*      + Sizeof IPV4 fields */
+#define DPAA2_CLS_RULE_OFFSET_ICMP     58      /* DPAA2_CLS_RULE_OFFSET_IPV6 */
+                                               /*      + Sizeof IPV6 fields */
+#define DPAA2_CLS_RULE_OFFSET_UDP      60      /* DPAA2_CLS_RULE_OFFSET_ICMP */
+                                               /*      + Sizeof ICMP fields */
+#define DPAA2_CLS_RULE_OFFSET_TCP      64      /* DPAA2_CLS_RULE_OFFSET_UDP  */
+                                               /*      + Sizeof UDP fields  */
+#define DPAA2_CLS_RULE_OFFSET_SCTP     68      /* DPAA2_CLS_RULE_OFFSET_TCP  */
+                                               /*      + Sizeof TCP fields  */
+#define DPAA2_CLS_RULE_OFFSET_GRE      72      /* DPAA2_CLS_RULE_OFFSET_SCTP */
+                                               /*      + Sizeof SCTP fields */
+
+static const
+enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
+       RTE_FLOW_ITEM_TYPE_END,
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_VLAN,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_IPV6,
+       RTE_FLOW_ITEM_TYPE_ICMP,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_TCP,
+       RTE_FLOW_ITEM_TYPE_SCTP,
+       RTE_FLOW_ITEM_TYPE_GRE,
+};
+
+static const
+enum rte_flow_action_type dpaa2_supported_action_type[] = {
+       RTE_FLOW_ACTION_TYPE_END,
+       RTE_FLOW_ACTION_TYPE_QUEUE,
+       RTE_FLOW_ACTION_TYPE_RSS
+};
+
+enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
+static const void *default_mask;
+
+static int
+dpaa2_configure_flow_eth(struct rte_flow *flow,
+                        struct rte_eth_dev *dev,
+                        const struct rte_flow_attr *attr,
+                        const struct rte_flow_item *pattern,
+                        const struct rte_flow_action actions[] __rte_unused,
+                        struct rte_flow_error *error __rte_unused)
+{
+       int index, j = 0;
+       size_t key_iova;
+       size_t mask_iova;
+       int device_configured = 0, entry_found = 0;
+       uint32_t group;
+       const struct rte_flow_item_eth *spec, *mask;
+
+       /* TODO: Currently upper bound of range parameter is not implemented */
+       const struct rte_flow_item_eth *last __rte_unused;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+       group = attr->group;
+
+       /* DPAA2 platform has a limitation that extract parameter can not be */
+       /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+       /* TODO: pattern is an array of 9 elements where 9th pattern element */
+       /* is for QoS table and 1-8th pattern element is for FS tables. */
+       /* It can be changed to macro. */
+       if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       for (j = 0; j < priv->pattern[8].item_count; j++) {
+               if (priv->pattern[8].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[8].pattern_type[j] = pattern->type;
+               priv->pattern[8].item_count++;
+               device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+       }
+
+       entry_found = 0;
+       for (j = 0; j < priv->pattern[group].item_count; j++) {
+               if (priv->pattern[group].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[group].pattern_type[j] = pattern->type;
+               priv->pattern[group].item_count++;
+               device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+       /* Get traffic class index and flow id to be configured */
+       flow->tc_id = group;
+       flow->index = attr->priority;
+
+       if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+               index = priv->extract.qos_key_cfg.num_extracts;
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
+               index++;
+
+               priv->extract.qos_key_cfg.num_extracts = index;
+       }
+
+       if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+               index = priv->extract.fs_key_cfg[group].num_extracts;
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
+               index++;
+
+               priv->extract.fs_key_cfg[group].num_extracts = index;
+       }
+
+       /* Parse pattern list to get the matching parameters */
+       spec    = (const struct rte_flow_item_eth *)pattern->spec;
+       last    = (const struct rte_flow_item_eth *)pattern->last;
+       mask    = (const struct rte_flow_item_eth *)
+                       (pattern->mask ? pattern->mask : default_mask);
+
+       /* Key rule */
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ETH;
+       memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes),
+                                               sizeof(struct ether_addr));
+       key_iova += sizeof(struct ether_addr);
+       memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes),
+                                               sizeof(struct ether_addr));
+       key_iova += sizeof(struct ether_addr);
+       memcpy((void *)key_iova, (const void *)(&spec->type),
+                                               sizeof(rte_be16_t));
+
+       /* Key mask */
+       mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ETH;
+       memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes),
+                                               sizeof(struct ether_addr));
+       mask_iova += sizeof(struct ether_addr);
+       memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes),
+                                               sizeof(struct ether_addr));
+       mask_iova += sizeof(struct ether_addr);
+       memcpy((void *)mask_iova, (const void *)(&mask->type),
+                                               sizeof(rte_be16_t));
+
+       flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ETH +
+                               ((2  * sizeof(struct ether_addr)) +
+                               sizeof(rte_be16_t)));
+       return device_configured;
+}
+
+static int
+dpaa2_configure_flow_vlan(struct rte_flow *flow,
+                         struct rte_eth_dev *dev,
+                         const struct rte_flow_attr *attr,
+                         const struct rte_flow_item *pattern,
+                         const struct rte_flow_action actions[] __rte_unused,
+                         struct rte_flow_error *error __rte_unused)
+{
+       int index, j = 0;
+       size_t key_iova;
+       size_t mask_iova;
+       int device_configured = 0, entry_found = 0;
+       uint32_t group;
+       const struct rte_flow_item_vlan *spec, *mask;
+
+       const struct rte_flow_item_vlan *last __rte_unused;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+       group = attr->group;
+
+       /* DPAA2 platform has a limitation that extract parameter can not be */
+       /*  more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+       if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       for (j = 0; j < priv->pattern[8].item_count; j++) {
+               if (priv->pattern[8].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[8].pattern_type[j] = pattern->type;
+               priv->pattern[8].item_count++;
+               device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+       }
+
+       entry_found = 0;
+       for (j = 0; j < priv->pattern[group].item_count; j++) {
+               if (priv->pattern[group].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[group].pattern_type[j] = pattern->type;
+               priv->pattern[group].item_count++;
+               device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+
+       /* Get traffic class index and flow id to be configured */
+       flow->tc_id = group;
+       flow->index = attr->priority;
+
+       if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+               index = priv->extract.qos_key_cfg.num_extracts;
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+               priv->extract.qos_key_cfg.num_extracts++;
+       }
+
+       if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+               index = priv->extract.fs_key_cfg[group].num_extracts;
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+               priv->extract.fs_key_cfg[group].num_extracts++;
+       }
+
+       /* Parse pattern list to get the matching parameters */
+       spec    = (const struct rte_flow_item_vlan *)pattern->spec;
+       last    = (const struct rte_flow_item_vlan *)pattern->last;
+       mask    = (const struct rte_flow_item_vlan *)
+                       (pattern->mask ? pattern->mask : default_mask);
+
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
+       memcpy((void *)key_iova, (const void *)(&spec->tci),
+                                                       sizeof(rte_be16_t));
+
+       mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
+       memcpy((void *)mask_iova, (const void *)(&mask->tci),
+                                                       sizeof(rte_be16_t));
+
+       flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_VLAN + sizeof(rte_be16_t));
+       return device_configured;
+}
+
+static int
+dpaa2_configure_flow_ipv4(struct rte_flow *flow,
+                         struct rte_eth_dev *dev,
+                         const struct rte_flow_attr *attr,
+                         const struct rte_flow_item *pattern,
+                         const struct rte_flow_action actions[] __rte_unused,
+                         struct rte_flow_error *error __rte_unused)
+{
+       int index, j = 0;
+       size_t key_iova;
+       size_t mask_iova;
+       int device_configured = 0, entry_found = 0;
+       uint32_t group;
+       const struct rte_flow_item_ipv4 *spec, *mask;
+
+       const struct rte_flow_item_ipv4 *last __rte_unused;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+       group = attr->group;
+
+       /* DPAA2 platform has a limitation that extract parameter can not be */
+       /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+       if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       for (j = 0; j < priv->pattern[8].item_count; j++) {
+               if (priv->pattern[8].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[8].pattern_type[j] = pattern->type;
+               priv->pattern[8].item_count++;
+               device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+       }
+
+       entry_found = 0;
+       for (j = 0; j < priv->pattern[group].item_count; j++) {
+               if (priv->pattern[group].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[group].pattern_type[j] = pattern->type;
+               priv->pattern[group].item_count++;
+               device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+       /* Get traffic class index and flow id to be configured */
+       flow->tc_id = group;
+       flow->index = attr->priority;
+
+       if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+               index = priv->extract.qos_key_cfg.num_extracts;
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+               index++;
+
+               priv->extract.qos_key_cfg.num_extracts = index;
+       }
+
+       if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+               index = priv->extract.fs_key_cfg[group].num_extracts;
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+               index++;
+
+               priv->extract.fs_key_cfg[group].num_extracts = index;
+       }
+
+       /* Parse pattern list to get the matching parameters */
+       spec    = (const struct rte_flow_item_ipv4 *)pattern->spec;
+       last    = (const struct rte_flow_item_ipv4 *)pattern->last;
+       mask    = (const struct rte_flow_item_ipv4 *)
+                       (pattern->mask ? pattern->mask : default_mask);
+
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
+       memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr,
+                                                       sizeof(uint32_t));
+       key_iova += sizeof(uint32_t);
+       memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr,
+                                                       sizeof(uint32_t));
+       key_iova += sizeof(uint32_t);
+       memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id,
+                                                       sizeof(uint8_t));
+
+       mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
+       memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr,
+                                                       sizeof(uint32_t));
+       mask_iova += sizeof(uint32_t);
+       memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr,
+                                                       sizeof(uint32_t));
+       mask_iova += sizeof(uint32_t);
+       memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id,
+                                                       sizeof(uint8_t));
+
+       flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV4 +
+                               (2 * sizeof(uint32_t)) + sizeof(uint8_t));
+
+       return device_configured;
+}
+
+static int
+dpaa2_configure_flow_ipv6(struct rte_flow *flow,
+                         struct rte_eth_dev *dev,
+                         const struct rte_flow_attr *attr,
+                         const struct rte_flow_item *pattern,
+                         const struct rte_flow_action actions[] __rte_unused,
+                         struct rte_flow_error *error __rte_unused)
+{
+       int index, j = 0;
+       size_t key_iova;
+       size_t mask_iova;
+       int device_configured = 0, entry_found = 0;
+       uint32_t group;
+       const struct rte_flow_item_ipv6 *spec, *mask;
+
+       const struct rte_flow_item_ipv6 *last __rte_unused;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+       group = attr->group;
+
+       /* DPAA2 platform has a limitation that extract parameter can not be */
+       /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+       if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       for (j = 0; j < priv->pattern[8].item_count; j++) {
+               if (priv->pattern[8].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[8].pattern_type[j] = pattern->type;
+               priv->pattern[8].item_count++;
+               device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+       }
+
+       entry_found = 0;
+       for (j = 0; j < priv->pattern[group].item_count; j++) {
+               if (priv->pattern[group].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[group].pattern_type[j] = pattern->type;
+               priv->pattern[group].item_count++;
+               device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+       /* Get traffic class index and flow id to be configured */
+       flow->tc_id = group;
+       flow->index = attr->priority;
+
+       if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+               index = priv->extract.qos_key_cfg.num_extracts;
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
+               index++;
+
+               priv->extract.qos_key_cfg.num_extracts = index;
+       }
+
+       if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+               index = priv->extract.fs_key_cfg[group].num_extracts;
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
+               index++;
+
+               priv->extract.fs_key_cfg[group].num_extracts = index;
+       }
+
+       /* Parse pattern list to get the matching parameters */
+       spec    = (const struct rte_flow_item_ipv6 *)pattern->spec;
+       last    = (const struct rte_flow_item_ipv6 *)pattern->last;
+       mask    = (const struct rte_flow_item_ipv6 *)
+                       (pattern->mask ? pattern->mask : default_mask);
+
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
+       memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr),
+                                               sizeof(spec->hdr.src_addr));
+       key_iova += sizeof(spec->hdr.src_addr);
+       memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr),
+                                               sizeof(spec->hdr.dst_addr));
+
+       mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
+       memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr),
+                                               sizeof(mask->hdr.src_addr));
+       mask_iova += sizeof(mask->hdr.src_addr);
+       memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr),
+                                               sizeof(mask->hdr.dst_addr));
+
+       flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV6 +
+                                       sizeof(spec->hdr.src_addr) +
+                                       sizeof(mask->hdr.dst_addr));
+       return device_configured;
+}
+
+static int
+dpaa2_configure_flow_icmp(struct rte_flow *flow,
+                         struct rte_eth_dev *dev,
+                         const struct rte_flow_attr *attr,
+                         const struct rte_flow_item *pattern,
+                         const struct rte_flow_action actions[] __rte_unused,
+                         struct rte_flow_error *error __rte_unused)
+{
+       int index, j = 0;
+       size_t key_iova;
+       size_t mask_iova;
+       int device_configured = 0, entry_found = 0;
+       uint32_t group;
+       const struct rte_flow_item_icmp *spec, *mask;
+
+       const struct rte_flow_item_icmp *last __rte_unused;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+       group = attr->group;
+
+       /* DPAA2 platform has a limitation that extract parameter can not be */
+       /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+       if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       for (j = 0; j < priv->pattern[8].item_count; j++) {
+               if (priv->pattern[8].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[8].pattern_type[j] = pattern->type;
+               priv->pattern[8].item_count++;
+               device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+       }
+
+       entry_found = 0;
+       for (j = 0; j < priv->pattern[group].item_count; j++) {
+               if (priv->pattern[group].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[group].pattern_type[j] = pattern->type;
+               priv->pattern[group].item_count++;
+               device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+       /* Get traffic class index and flow id to be configured */
+       flow->tc_id = group;
+       flow->index = attr->priority;
+
+       if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+               index = priv->extract.qos_key_cfg.num_extracts;
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
+               index++;
+
+               priv->extract.qos_key_cfg.num_extracts = index;
+       }
+
+       if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+               index = priv->extract.fs_key_cfg[group].num_extracts;
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
+               index++;
+
+               priv->extract.fs_key_cfg[group].num_extracts = index;
+       }
+
+       /* Parse pattern list to get the matching parameters */
+       spec    = (const struct rte_flow_item_icmp *)pattern->spec;
+       last    = (const struct rte_flow_item_icmp *)pattern->last;
+       mask    = (const struct rte_flow_item_icmp *)
+                       (pattern->mask ? pattern->mask : default_mask);
+
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
+       memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type,
+                                                       sizeof(uint8_t));
+       key_iova += sizeof(uint8_t);
+       memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code,
+                                                       sizeof(uint8_t));
+
+       mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
+       memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type,
+                                                       sizeof(uint8_t));
+       key_iova += sizeof(uint8_t);
+       memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code,
+                                                       sizeof(uint8_t));
+
+       flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ICMP +
+                               (2 * sizeof(uint8_t)));
+
+       return device_configured;
+}
+
+static int
+dpaa2_configure_flow_udp(struct rte_flow *flow,
+                        struct rte_eth_dev *dev,
+                         const struct rte_flow_attr *attr,
+                         const struct rte_flow_item *pattern,
+                         const struct rte_flow_action actions[] __rte_unused,
+                         struct rte_flow_error *error __rte_unused)
+{
+       int index, j = 0;
+       size_t key_iova;
+       size_t mask_iova;
+       int device_configured = 0, entry_found = 0;
+       uint32_t group;
+       const struct rte_flow_item_udp *spec, *mask;
+
+       const struct rte_flow_item_udp *last __rte_unused;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+       group = attr->group;
+
+       /* DPAA2 platform has a limitation that extract parameter can not be */
+       /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+       if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       for (j = 0; j < priv->pattern[8].item_count; j++) {
+               if (priv->pattern[8].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                        entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[8].pattern_type[j] = pattern->type;
+               priv->pattern[8].item_count++;
+               device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+       }
+
+       entry_found = 0;
+       for (j = 0; j < priv->pattern[group].item_count; j++) {
+               if (priv->pattern[group].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[group].pattern_type[j] = pattern->type;
+               priv->pattern[group].item_count++;
+               device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+       /* Get traffic class index and flow id to be configured */
+       flow->tc_id = group;
+       flow->index = attr->priority;
+
+       if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+               index = priv->extract.qos_key_cfg.num_extracts;
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
+               index++;
+
+               priv->extract.qos_key_cfg.num_extracts = index;
+       }
+
+       if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+               index = priv->extract.fs_key_cfg[group].num_extracts;
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
+               index++;
+
+               priv->extract.fs_key_cfg[group].num_extracts = index;
+       }
+
+       /* Parse pattern list to get the matching parameters */
+       spec    = (const struct rte_flow_item_udp *)pattern->spec;
+       last    = (const struct rte_flow_item_udp *)pattern->last;
+       mask    = (const struct rte_flow_item_udp *)
+                       (pattern->mask ? pattern->mask : default_mask);
+
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
+                                       (2 * sizeof(uint32_t));
+       memset((void *)key_iova, 0x11, sizeof(uint8_t));
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_UDP;
+       memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
+                                                       sizeof(uint16_t));
+       key_iova +=  sizeof(uint16_t);
+       memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
+                                                       sizeof(uint16_t));
+
+       mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_UDP;
+       memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
+                                                       sizeof(uint16_t));
+       mask_iova +=  sizeof(uint16_t);
+       memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
+                                                       sizeof(uint16_t));
+
+       flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_UDP +
+                               (2 * sizeof(uint16_t)));
+
+       return device_configured;
+}
+
+static int
+dpaa2_configure_flow_tcp(struct rte_flow *flow,
+                        struct rte_eth_dev *dev,
+                        const struct rte_flow_attr *attr,
+                        const struct rte_flow_item *pattern,
+                        const struct rte_flow_action actions[] __rte_unused,
+                        struct rte_flow_error *error __rte_unused)
+{
+       int index, j = 0;
+       size_t key_iova;
+       size_t mask_iova;
+       int device_configured = 0, entry_found = 0;
+       uint32_t group;
+       const struct rte_flow_item_tcp *spec, *mask;
+
+       const struct rte_flow_item_tcp *last __rte_unused;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+       group = attr->group;
+
+       /* DPAA2 platform has a limitation that extract parameter can not be */
+       /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
+       if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       for (j = 0; j < priv->pattern[8].item_count; j++) {
+               if (priv->pattern[8].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[8].pattern_type[j] = pattern->type;
+               priv->pattern[8].item_count++;
+               device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+       }
+
+       entry_found = 0;
+       for (j = 0; j < priv->pattern[group].item_count; j++) {
+               if (priv->pattern[group].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[group].pattern_type[j] = pattern->type;
+               priv->pattern[group].item_count++;
+               device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+       /* Get traffic class index and flow id to be configured */
+       flow->tc_id = group;
+       flow->index = attr->priority;
+
+       if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+               index = priv->extract.qos_key_cfg.num_extracts;
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
+               index++;
+
+               priv->extract.qos_key_cfg.num_extracts = index;
+       }
+
+       if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+               index = priv->extract.fs_key_cfg[group].num_extracts;
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
+               index++;
+
+               priv->extract.fs_key_cfg[group].num_extracts = index;
+       }
+
+       /* Parse pattern list to get the matching parameters */
+       spec    = (const struct rte_flow_item_tcp *)pattern->spec;
+       last    = (const struct rte_flow_item_tcp *)pattern->last;
+       mask    = (const struct rte_flow_item_tcp *)
+                       (pattern->mask ? pattern->mask : default_mask);
+
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
+                                       (2 * sizeof(uint32_t));
+       memset((void *)key_iova, 0x06, sizeof(uint8_t));
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_TCP;
+       memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
+                                                       sizeof(uint16_t));
+       key_iova += sizeof(uint16_t);
+       memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
+                                                       sizeof(uint16_t));
+
+       mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_TCP;
+       memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
+                                                       sizeof(uint16_t));
+       mask_iova += sizeof(uint16_t);
+       memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
+                                                       sizeof(uint16_t));
+
+       flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_TCP +
+                               (2 * sizeof(uint16_t)));
+
+       return device_configured;
+}
+
+static int
+dpaa2_configure_flow_sctp(struct rte_flow *flow,
+                         struct rte_eth_dev *dev,
+                         const struct rte_flow_attr *attr,
+                         const struct rte_flow_item *pattern,
+                         const struct rte_flow_action actions[] __rte_unused,
+                         struct rte_flow_error *error __rte_unused)
+{
+       int index, j = 0;
+       size_t key_iova;
+       size_t mask_iova;
+       int device_configured = 0, entry_found = 0;
+       uint32_t group;
+       const struct rte_flow_item_sctp *spec, *mask;
+
+       const struct rte_flow_item_sctp *last __rte_unused;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+       group = attr->group;
+
+       /* DPAA2 platform has a limitation that extract parameter can not be */
+       /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
+       if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       for (j = 0; j < priv->pattern[8].item_count; j++) {
+               if (priv->pattern[8].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[8].pattern_type[j] = pattern->type;
+               priv->pattern[8].item_count++;
+               device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+       }
+
+       entry_found = 0;
+       for (j = 0; j < priv->pattern[group].item_count; j++) {
+               if (priv->pattern[group].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[group].pattern_type[j] = pattern->type;
+               priv->pattern[group].item_count++;
+               device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+       /* Get traffic class index and flow id to be configured */
+       flow->tc_id = group;
+       flow->index = attr->priority;
+
+       if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+               index = priv->extract.qos_key_cfg.num_extracts;
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
+               index++;
+
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
+               index++;
+
+               priv->extract.qos_key_cfg.num_extracts = index;
+       }
+
+       if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+               index = priv->extract.fs_key_cfg[group].num_extracts;
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
+               index++;
+
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
+               index++;
+
+               priv->extract.fs_key_cfg[group].num_extracts = index;
+       }
+
+       /* Parse pattern list to get the matching parameters */
+       spec    = (const struct rte_flow_item_sctp *)pattern->spec;
+       last    = (const struct rte_flow_item_sctp *)pattern->last;
+       mask    = (const struct rte_flow_item_sctp *)
+                       (pattern->mask ? pattern->mask : default_mask);
+
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
+                                               (2 * sizeof(uint32_t));
+       memset((void *)key_iova, 0x84, sizeof(uint8_t));
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
+       memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
+                                                       sizeof(uint16_t));
+       key_iova += sizeof(uint16_t);
+       memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
+                                                       sizeof(uint16_t));
+
+       mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
+       memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
+                                                       sizeof(uint16_t));
+       mask_iova += sizeof(uint16_t);
+       memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
+                                                       sizeof(uint16_t));
+
+       flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_SCTP +
+                               (2 * sizeof(uint16_t)));
+       return device_configured;
+}
+
+static int
+dpaa2_configure_flow_gre(struct rte_flow *flow,
+                        struct rte_eth_dev *dev,
+                        const struct rte_flow_attr *attr,
+                        const struct rte_flow_item *pattern,
+                        const struct rte_flow_action actions[] __rte_unused,
+                        struct rte_flow_error *error __rte_unused)
+{
+       int index, j = 0;
+       size_t key_iova;
+       size_t mask_iova;
+       int device_configured = 0, entry_found = 0;
+       uint32_t group;
+       const struct rte_flow_item_gre *spec, *mask;
+
+       const struct rte_flow_item_gre *last __rte_unused;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+       group = attr->group;
+
+       /* DPAA2 platform has a limitation that extract parameter can not be */
+       /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
+       if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
+               DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
+                                               DPKG_MAX_NUM_OF_EXTRACTS);
+               return -ENOTSUP;
+       }
+
+       for (j = 0; j < priv->pattern[8].item_count; j++) {
+               if (priv->pattern[8].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[8].pattern_type[j] = pattern->type;
+               priv->pattern[8].item_count++;
+               device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+       }
+
+       entry_found = 0;
+       for (j = 0; j < priv->pattern[group].item_count; j++) {
+               if (priv->pattern[group].pattern_type[j] != pattern->type) {
+                       continue;
+               } else {
+                       entry_found = 1;
+                       break;
+               }
+       }
+
+       if (!entry_found) {
+               priv->pattern[group].pattern_type[j] = pattern->type;
+               priv->pattern[group].item_count++;
+               device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+       /* Get traffic class index and flow id to be configured */
+       flow->tc_id = group;
+       flow->index = attr->priority;
+
+       if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+               index = priv->extract.qos_key_cfg.num_extracts;
+               priv->extract.qos_key_cfg.extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
+               priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
+               index++;
+
+               priv->extract.qos_key_cfg.num_extracts = index;
+       }
+
+       if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+               index = priv->extract.fs_key_cfg[group].num_extracts;
+               priv->extract.fs_key_cfg[group].extracts[index].type =
+                                                       DPKG_EXTRACT_FROM_HDR;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
+               priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
+               index++;
+
+               priv->extract.fs_key_cfg[group].num_extracts = index;
+       }
+
+       /* Parse pattern list to get the matching parameters */
+       spec    = (const struct rte_flow_item_gre *)pattern->spec;
+       last    = (const struct rte_flow_item_gre *)pattern->last;
+       mask    = (const struct rte_flow_item_gre *)
+                       (pattern->mask ? pattern->mask : default_mask);
+
+       key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_GRE;
+       memcpy((void *)key_iova, (const void *)(&spec->protocol),
+                                                       sizeof(rte_be16_t));
+
+       mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_GRE;
+       memcpy((void *)mask_iova, (const void *)(&mask->protocol),
+                                                       sizeof(rte_be16_t));
+
+       flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_GRE + sizeof(rte_be16_t));
+
+       return device_configured;
+}
+
+static int
+dpaa2_generic_flow_set(struct rte_flow *flow,
+                      struct rte_eth_dev *dev,
+                      const struct rte_flow_attr *attr,
+                      const struct rte_flow_item pattern[],
+                      const struct rte_flow_action actions[],
+                      struct rte_flow_error *error)
+{
+       const struct rte_flow_action_queue *dest_queue;
+       const struct rte_flow_action_rss *rss_conf;
+       uint16_t index;
+       int is_keycfg_configured = 0, end_of_list = 0;
+       int ret = 0, i = 0, j = 0;
+       struct dpni_attr nic_attr;
+       struct dpni_rx_tc_dist_cfg tc_cfg;
+       struct dpni_qos_tbl_cfg qos_cfg;
+       struct dpkg_profile_cfg key_cfg;
+       struct dpni_fs_action_cfg action;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+       struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+       size_t param;
+
+       /* Parse pattern list to get the matching parameters */
+       while (!end_of_list) {
+               switch (pattern[i].type) {
+               case RTE_FLOW_ITEM_TYPE_ETH:
+                       is_keycfg_configured = dpaa2_configure_flow_eth(flow,
+                                                                       dev,
+                                                                       attr,
+                                                                       &pattern[i],
+                                                                       actions,
+                                                                       error);
+                       break;
+               case RTE_FLOW_ITEM_TYPE_VLAN:
+                       is_keycfg_configured = dpaa2_configure_flow_vlan(flow,
+                                                                       dev,
+                                                                       attr,
+                                                                       &pattern[i],
+                                                                       actions,
+                                                                       error);
+                       break;
+               case RTE_FLOW_ITEM_TYPE_IPV4:
+                       is_keycfg_configured = dpaa2_configure_flow_ipv4(flow,
+                                                                       dev,
+                                                                       attr,
+                                                                       &pattern[i],
+                                                                       actions,
+                                                                       error);
+                       break;
+               case RTE_FLOW_ITEM_TYPE_IPV6:
+                       is_keycfg_configured = dpaa2_configure_flow_ipv6(flow,
+                                                                       dev,
+                                                                       attr,
+                                                                       &pattern[i],
+                                                                       actions,
+                                                                       error);
+                       break;
+               case RTE_FLOW_ITEM_TYPE_ICMP:
+                       is_keycfg_configured = dpaa2_configure_flow_icmp(flow,
+                                                                       dev,
+                                                                       attr,
+                                                                       &pattern[i],
+                                                                       actions,
+                                                                       error);
+                       break;
+               case RTE_FLOW_ITEM_TYPE_UDP:
+                       is_keycfg_configured = dpaa2_configure_flow_udp(flow,
+                                                                       dev,
+                                                                       attr,
+                                                                       &pattern[i],
+                                                                       actions,
+                                                                       error);
+                       break;
+               case RTE_FLOW_ITEM_TYPE_TCP:
+                       is_keycfg_configured = dpaa2_configure_flow_tcp(flow,
+                                                                       dev,
+                                                                       attr,
+                                                                       &pattern[i],
+                                                                       actions,
+                                                                       error);
+                       break;
+               case RTE_FLOW_ITEM_TYPE_SCTP:
+                       is_keycfg_configured = dpaa2_configure_flow_sctp(flow,
+                                                                       dev, attr,
+                                                                       &pattern[i],
+                                                                       actions,
+                                                                       error);
+                       break;
+               case RTE_FLOW_ITEM_TYPE_GRE:
+                       is_keycfg_configured = dpaa2_configure_flow_gre(flow,
+                                                                       dev,
+                                                                       attr,
+                                                                       &pattern[i],
+                                                                       actions,
+                                                                       error);
+                       break;
+               case RTE_FLOW_ITEM_TYPE_END:
+                       end_of_list = 1;
+                       break; /*End of List*/
+               default:
+                       DPAA2_PMD_ERR("Invalid action type");
+                       ret = -ENOTSUP;
+                       break;
+               }
+               i++;
+       }
+
+       /* Let's parse action on matching traffic */
+       end_of_list = 0;
+       while (!end_of_list) {
+               switch (actions[j].type) {
+               case RTE_FLOW_ACTION_TYPE_QUEUE:
+                       dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
+                       flow->flow_id = dest_queue->index;
+                       flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
+                       memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
+                       action.flow_id = flow->flow_id;
+                       if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+                               if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
+                                                        (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
+                                       DPAA2_PMD_ERR(
+                                       "Unable to prepare extract parameters");
+                                       return -1;
+                               }
+
+                               memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
+                               qos_cfg.discard_on_miss = true;
+                               qos_cfg.keep_entries = true;
+                               qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
+                               ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
+                                                        priv->token, &qos_cfg);
+                               if (ret < 0) {
+                                       DPAA2_PMD_ERR(
+                                       "Distribution cannot be configured.(%d)"
+                                       , ret);
+                                       return -1;
+                               }
+                       }
+                       if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+                               if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id],
+                                               (uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) {
+                                       DPAA2_PMD_ERR(
+                                       "Unable to prepare extract parameters");
+                                       return -1;
+                               }
+
+                               memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+                               tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
+                               tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
+                               tc_cfg.key_cfg_iova =
+                                       (uint64_t)priv->extract.fs_extract_param[flow->tc_id];
+                               tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
+                               tc_cfg.fs_cfg.keep_entries = true;
+                               ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
+                                                        priv->token,
+                                                        flow->tc_id, &tc_cfg);
+                               if (ret < 0) {
+                                       DPAA2_PMD_ERR(
+                                       "Distribution cannot be configured.(%d)"
+                                       , ret);
+                                       return -1;
+                               }
+                       }
+                       /* Configure QoS table first */
+                       memset(&nic_attr, 0, sizeof(struct dpni_attr));
+                       ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
+                                                priv->token, &nic_attr);
+                       if (ret < 0) {
+                               DPAA2_PMD_ERR(
+                               "Failure to get attribute. dpni@%p err code(%d)\n",
+                               dpni, ret);
+                               return ret;
+                       }
+
+                       action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
+                       index = flow->index + (flow->tc_id * nic_attr.fs_entries);
+                       ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
+                                               priv->token, &flow->rule,
+                                               flow->tc_id, index);
+                       if (ret < 0) {
+                               DPAA2_PMD_ERR(
+                               "Error in addnig entry to QoS table(%d)", ret);
+                               return ret;
+                       }
+
+                       /* Then Configure FS table */
+                       ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
+                                               flow->tc_id, flow->index,
+                                               &flow->rule, &action);
+                       if (ret < 0) {
+                               DPAA2_PMD_ERR(
+                               "Error in adding entry to FS table(%d)", ret);
+                               return ret;
+                       }
+                       break;
+               case RTE_FLOW_ACTION_TYPE_RSS:
+                       ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
+                                                priv->token, &nic_attr);
+                       if (ret < 0) {
+                               DPAA2_PMD_ERR(
+                               "Failure to get attribute. dpni@%p err code(%d)\n",
+                               dpni, ret);
+                               return ret;
+                       }
+                       rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
+                       for (i = 0; i < (int)rss_conf->queue_num; i++) {
+                               if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) ||
+                                   rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) {
+                                       DPAA2_PMD_ERR(
+                                       "Queue/Group combination are not supported\n");
+                                       return -ENOTSUP;
+                               }
+                       }
+
+                       flow->action = RTE_FLOW_ACTION_TYPE_RSS;
+                       ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
+                                                               &key_cfg);
+                       if (ret < 0) {
+                               DPAA2_PMD_ERR(
+                               "unable to set flow distribution.please check queue config\n");
+                               return ret;
+                       }
+
+                       /* Allocate DMA'ble memory to write the rules */
+                       param = (size_t)rte_malloc(NULL, 256, 64);
+                       if (!param) {
+                               DPAA2_PMD_ERR("Memory allocation failure\n");
+                               return -1;
+                       }
+
+                       if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) {
+                               DPAA2_PMD_ERR(
+                               "Unable to prepare extract parameters");
+                               rte_free((void *)param);
+                               return -1;
+                       }
+
+                       memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+                       tc_cfg.dist_size = rss_conf->queue_num;
+                       tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+                       tc_cfg.key_cfg_iova = (size_t)param;
+                       tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
+
+                       ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
+                                                priv->token, flow->tc_id,
+                                                &tc_cfg);
+                       if (ret < 0) {
+                               DPAA2_PMD_ERR(
+                               "Distribution cannot be configured: %d\n", ret);
+                               rte_free((void *)param);
+                               return -1;
+                       }
+
+                       rte_free((void *)param);
+                       if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+                               if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
+                                       (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
+                                       DPAA2_PMD_ERR(
+                                       "Unable to prepare extract parameters");
+                                       return -1;
+                               }
+                               memset(&qos_cfg, 0,
+                                       sizeof(struct dpni_qos_tbl_cfg));
+                               qos_cfg.discard_on_miss = true;
+                               qos_cfg.keep_entries = true;
+                               qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
+                               ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
+                                                        priv->token, &qos_cfg);
+                               if (ret < 0) {
+                                       DPAA2_PMD_ERR(
+                                       "Distribution can not be configured(%d)\n",
+                                       ret);
+                                       return -1;
+                               }
+                       }
+
+                       /* Add Rule into QoS table */
+                       index = flow->index + (flow->tc_id * nic_attr.fs_entries);
+                       ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
+                                               &flow->rule, flow->tc_id,
+                                               index);
+                       if (ret < 0) {
+                               DPAA2_PMD_ERR(
+                               "Error in entry addition in QoS table(%d)",
+                               ret);
+                               return ret;
+                       }
+                       break;
+               case RTE_FLOW_ACTION_TYPE_END:
+                       end_of_list = 1;
+                       break;
+               default:
+                       DPAA2_PMD_ERR("Invalid action type");
+                       ret = -ENOTSUP;
+                       break;
+               }
+               j++;
+       }
+
+       return ret;
+}
+
+static inline int
+dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
+                     const struct rte_flow_attr *attr)
+{
+       int ret = 0;
+
+       if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
+               DPAA2_PMD_ERR("Priority group is out of range\n");
+               ret = -ENOTSUP;
+       }
+       if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
+               DPAA2_PMD_ERR("Priority within the group is out of range\n");
+               ret = -ENOTSUP;
+       }
+       if (unlikely(attr->egress)) {
+               DPAA2_PMD_ERR(
+                       "Flow configuration is not supported on egress side\n");
+               ret = -ENOTSUP;
+       }
+       if (unlikely(!attr->ingress)) {
+               DPAA2_PMD_ERR("Ingress flag must be configured\n");
+               ret = -EINVAL;
+       }
+       return ret;
+}
+
+static inline void
+dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern)
+{
+       switch (pattern->type) {
+       case RTE_FLOW_ITEM_TYPE_ETH:
+               default_mask = (const void *)&rte_flow_item_eth_mask;
+               break;
+       case RTE_FLOW_ITEM_TYPE_VLAN:
+               default_mask = (const void *)&rte_flow_item_vlan_mask;
+               break;
+       case RTE_FLOW_ITEM_TYPE_IPV4:
+               default_mask = (const void *)&rte_flow_item_ipv4_mask;
+               break;
+       case RTE_FLOW_ITEM_TYPE_IPV6:
+               default_mask = (const void *)&rte_flow_item_ipv6_mask;
+               break;
+       case RTE_FLOW_ITEM_TYPE_ICMP:
+               default_mask = (const void *)&rte_flow_item_icmp_mask;
+               break;
+       case RTE_FLOW_ITEM_TYPE_UDP:
+               default_mask = (const void *)&rte_flow_item_udp_mask;
+               break;
+       case RTE_FLOW_ITEM_TYPE_TCP:
+               default_mask = (const void *)&rte_flow_item_tcp_mask;
+               break;
+       case RTE_FLOW_ITEM_TYPE_SCTP:
+               default_mask = (const void *)&rte_flow_item_sctp_mask;
+               break;
+       case RTE_FLOW_ITEM_TYPE_GRE:
+               default_mask = (const void *)&rte_flow_item_gre_mask;
+               break;
+       default:
+               DPAA2_PMD_ERR("Invalid pattern type");
+       }
+}
+
+static inline int
+dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv,
+                         const struct rte_flow_item pattern[])
+{
+       unsigned int i, j, k, is_found = 0;
+       int ret = 0;
+
+       for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
+               for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
+                       if (dpaa2_supported_pattern_type[i] == pattern[j].type) {
+                               is_found = 1;
+                               break;
+                       }
+               }
+               if (!is_found) {
+                       ret = -ENOTSUP;
+                       break;
+               }
+       }
+       /* Lets verify other combinations of given pattern rules */
+       for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
+               if (!pattern[j].spec) {
+                       ret = -EINVAL;
+                       break;
+               }
+               if ((pattern[j].last) && (!pattern[j].mask))
+                       dpaa2_dev_update_default_mask(&pattern[j]);
+       }
+
+       /* DPAA2 platform has a limitation that extract parameter can not be */
+       /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
+       for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
+               for (j = 0; j < MAX_TCS + 1; j++) {
+                               for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; j++) {
+                                       if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type)
+                                               break;
+                               }
+                       if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS)
+                               ret = -ENOTSUP;
+               }
+       }
+       return ret;
+}
+
+static inline int
+dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
+{
+       unsigned int i, j, is_found = 0;
+       int ret = 0;
+
+       for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
+               for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
+                       if (dpaa2_supported_action_type[i] == actions[j].type) {
+                               is_found = 1;
+                               break;
+                       }
+               }
+               if (!is_found) {
+                       ret = -ENOTSUP;
+                       break;
+               }
+       }
+       for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
+               if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf))
+                       ret = -EINVAL;
+       }
+       return ret;
+}
+
+static
+int dpaa2_flow_validate(struct rte_eth_dev *dev,
+                       const struct rte_flow_attr *flow_attr,
+                       const struct rte_flow_item pattern[],
+                       const struct rte_flow_action actions[],
+                       struct rte_flow_error *error __rte_unused)
+{
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+       struct dpni_attr dpni_attr;
+       struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+       uint16_t token = priv->token;
+       int ret = 0;
+
+       memset(&dpni_attr, 0, sizeof(struct dpni_attr));
+       ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
+       if (ret < 0) {
+               DPAA2_PMD_ERR(
+                       "Failure to get dpni@%p attribute, err code  %d\n",
+                       dpni, ret);
+               return ret;
+       }
+
+       /* Verify input attributes */
+       ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
+       if (ret < 0) {
+               DPAA2_PMD_ERR(
+                       "Invalid attributes are given\n");
+               goto not_valid_params;
+       }
+       /* Verify input pattern list */
+       ret = dpaa2_dev_verify_patterns(priv, pattern);
+       if (ret < 0) {
+               DPAA2_PMD_ERR(
+                       "Invalid pattern list is given\n");
+               goto not_valid_params;
+       }
+       /* Verify input action list */
+       ret = dpaa2_dev_verify_actions(actions);
+       if (ret < 0) {
+               DPAA2_PMD_ERR(
+                       "Invalid action list is given\n");
+               goto not_valid_params;
+       }
+not_valid_params:
+       return ret;
+}
+
+static
+struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
+                                  const struct rte_flow_attr *attr,
+                                  const struct rte_flow_item pattern[],
+                                  const struct rte_flow_action actions[],
+                                  struct rte_flow_error *error)
+{
+       struct rte_flow *flow = NULL;
+       size_t key_iova = 0, mask_iova = 0;
+       int ret;
+
+       flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
+       if (!flow) {
+               DPAA2_PMD_ERR("Failure to allocate memory for flow");
+               return NULL;
+       }
+       /* Allocate DMA'ble memory to write the rules */
+       key_iova = (size_t)rte_malloc(NULL, 256, 64);
+       if (!key_iova) {
+               DPAA2_PMD_ERR(
+                       "Memory allocation failure for rule configration\n");
+               goto creation_error;
+       }
+       mask_iova = (size_t)rte_malloc(NULL, 256, 64);
+       if (!mask_iova) {
+               DPAA2_PMD_ERR(
+                       "Memory allocation failure for rule configration\n");
+               goto creation_error;
+       }
+
+       flow->rule.key_iova = key_iova;
+       flow->rule.mask_iova = mask_iova;
+       flow->rule.key_size = 0;
+
+       switch (dpaa2_filter_type) {
+       case RTE_ETH_FILTER_GENERIC:
+               ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
+                                            actions, error);
+               if (ret < 0) {
+                       DPAA2_PMD_ERR(
+                       "Failure to create flow, return code (%d)", ret);
+                       goto creation_error;
+               }
+               break;
+       default:
+               DPAA2_PMD_ERR("Filter type (%d) not supported",
+               dpaa2_filter_type);
+               break;
+       }
+
+       return flow;
+
+creation_error:
+       if (flow)
+               rte_free((void *)flow);
+       if (key_iova)
+               rte_free((void *)key_iova);
+       if (mask_iova)
+               rte_free((void *)mask_iova);
+       return NULL;
+}
+
+static
+int dpaa2_flow_destroy(struct rte_eth_dev *dev,
+                      struct rte_flow *flow,
+                      struct rte_flow_error *error __rte_unused)
+{
+       int ret = 0;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+       struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+       switch (flow->action) {
+       case RTE_FLOW_ACTION_TYPE_QUEUE:
+               /* Remove entry from QoS table first */
+               ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
+                                          &flow->rule);
+               if (ret < 0) {
+                       DPAA2_PMD_ERR(
+                               "Error in adding entry to QoS table(%d)", ret);
+                       goto error;
+               }
+
+               /* Then remove entry from FS table */
+               ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
+                                          flow->tc_id, &flow->rule);
+               if (ret < 0) {
+                       DPAA2_PMD_ERR(
+                               "Error in entry addition in FS table(%d)", ret);
+                       goto error;
+               }
+               break;
+       case RTE_FLOW_ACTION_TYPE_RSS:
+               ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
+                                          &flow->rule);
+               if (ret < 0) {
+                       DPAA2_PMD_ERR(
+                       "Error in entry addition in QoS table(%d)", ret);
+                       goto error;
+               }
+               break;
+       default:
+               DPAA2_PMD_ERR(
+               "Action type (%d) is not supported", flow->action);
+               ret = -ENOTSUP;
+               break;
+       }
+
+       /* Now free the flow */
+       rte_free(flow);
+
+error:
+       return ret;
+}
+
+static int
+dpaa2_flow_flush(struct rte_eth_dev *dev,
+                struct rte_flow_error *error __rte_unused)
+{
+       int ret = 0, tc_id;
+       struct dpni_rx_tc_dist_cfg tc_cfg;
+       struct dpni_qos_tbl_cfg qos_cfg;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+       struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+       /* Reset QoS table */
+       qos_cfg.default_tc = 0;
+       qos_cfg.discard_on_miss = false;
+       qos_cfg.keep_entries = false;
+       qos_cfg.key_cfg_iova = priv->extract.qos_extract_param;
+       ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, priv->token, &qos_cfg);
+       if (ret < 0)
+               DPAA2_PMD_ERR(
+                       "QoS table is not reset to default: %d\n", ret);
+
+       for (tc_id = 0; tc_id < priv->num_rx_tc; tc_id++) {
+               /* Reset FS table */
+               memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+               ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token,
+                                        tc_id, &tc_cfg);
+               if (ret < 0)
+                       DPAA2_PMD_ERR(
+                       "Error (%d) in flushing entries for TC (%d)",
+                       ret, tc_id);
+       }
+       return ret;
+}
+
+static int
+dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
+               struct rte_flow *flow __rte_unused,
+               const struct rte_flow_action *actions __rte_unused,
+               void *data __rte_unused,
+               struct rte_flow_error *error __rte_unused)
+{
+       return 0;
+}
+
+const struct rte_flow_ops dpaa2_flow_ops = {
+       .create = dpaa2_flow_create,
+       .validate = dpaa2_flow_validate,
+       .destroy = dpaa2_flow_destroy,
+       .flush  = dpaa2_flow_flush,
+       .query  = dpaa2_flow_query,
+};
index 0907a36..6c12a0a 100644 (file)
@@ -1528,6 +1528,248 @@ int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
        return mc_send_command(mc_io, &cmd);
 }
 
+/**
+ * dpni_set_qos_table() - Set QoS mapping table
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg:       QoS table configuration
+ *
+ * This function and all QoS-related functions require that
+ *'max_tcs > 1' was set at DPNI creation.
+ *
+ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
+ *                     prepare the key_cfg_iova parameter
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+                      uint32_t cmd_flags,
+                      uint16_t token,
+                      const struct dpni_qos_tbl_cfg *cfg)
+{
+       struct dpni_cmd_set_qos_table *cmd_params;
+       struct mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
+       cmd_params->default_tc = cfg->default_tc;
+       cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+       dpni_set_field(cmd_params->discard_on_miss,
+                      ENABLE,
+                      cfg->discard_on_miss);
+       dpni_set_field(cmd_params->discard_on_miss,
+                                       KEEP_QOS_ENTRIES,
+                              cfg->keep_entries);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg:       QoS rule to add
+ * @tc_id:     Traffic class selection (0-7)
+ * @index:     Location in the QoS table where to insert the entry.
+ *             Only relevant if MASKING is enabled for QoS classification on
+ *             this DPNI, it is ignored for exact match.
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+                      uint32_t cmd_flags,
+                      uint16_t token,
+                      const struct dpni_rule_cfg *cfg,
+                      uint8_t tc_id,
+                      uint16_t index)
+{
+       struct dpni_cmd_add_qos_entry *cmd_params;
+       struct mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
+       cmd_params->tc_id = tc_id;
+       cmd_params->key_size = cfg->key_size;
+       cmd_params->index = cpu_to_le16(index);
+       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_qos_entry() - Remove QoS mapping entry
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg:       QoS rule to remove
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+                         uint32_t cmd_flags,
+                         uint16_t token,
+                         const struct dpni_rule_cfg *cfg)
+{
+       struct dpni_cmd_remove_qos_entry *cmd_params;
+       struct mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
+       cmd_params->key_size = cfg->key_size;
+       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_qos_table() - Clear all QoS mapping entries
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ *
+ * Following this function call, all frames are directed to
+ * the default traffic class (0)
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+                        uint32_t cmd_flags,
+                        uint16_t token)
+{
+       struct mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
+                                         cmd_flags,
+                                         token);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
+ *                     (to select a flow ID)
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @tc_id:     Traffic class selection (0-7)
+ * @index:     Location in the QoS table where to insert the entry.
+ *             Only relevant if MASKING is enabled for QoS classification
+ *             on this DPNI, it is ignored for exact match.
+ * @cfg:       Flow steering rule to add
+ * @action:    Action to be taken as result of a classification hit
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+                     uint32_t cmd_flags,
+                     uint16_t token,
+                     uint8_t tc_id,
+                     uint16_t index,
+                     const struct dpni_rule_cfg *cfg,
+                     const struct dpni_fs_action_cfg *action)
+{
+       struct dpni_cmd_add_fs_entry *cmd_params;
+       struct mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
+       cmd_params->tc_id = tc_id;
+       cmd_params->key_size = cfg->key_size;
+       cmd_params->index = cpu_to_le16(index);
+       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+       cmd_params->options = cpu_to_le16(action->options);
+       cmd_params->flow_id = cpu_to_le16(action->flow_id);
+       cmd_params->flc = cpu_to_le64(action->flc);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
+ *                     traffic class
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @tc_id:     Traffic class selection (0-7)
+ * @cfg:       Flow steering rule to remove
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+                        uint32_t cmd_flags,
+                        uint16_t token,
+                        uint8_t tc_id,
+                        const struct dpni_rule_cfg *cfg)
+{
+       struct dpni_cmd_remove_fs_entry *cmd_params;
+       struct mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
+       cmd_params->tc_id = tc_id;
+       cmd_params->key_size = cfg->key_size;
+       cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+       cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific
+ *                     traffic class
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @tc_id:     Traffic class selection (0-7)
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
+                         uint32_t cmd_flags,
+                         uint16_t token,
+                         uint8_t tc_id)
+{
+       struct dpni_cmd_clear_fs_entries *cmd_params;
+       struct mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_clear_fs_entries *)cmd.params;
+       cmd_params->tc_id = tc_id;
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
 /**
  * dpni_set_congestion_notification() - Set traffic class congestion
  *     notification configuration
@@ -2064,6 +2306,76 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
        return 0;
 }
 
+/**
+ * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg: Distribution configuration
+ * If the FS is already enabled with a previous call the classification
+ *             key will be changed but all the table rules are kept. If the
+ *             existing rules do not match the key the results will not be
+ *             predictable. It is the user responsibility to keep key integrity
+ * If cfg.enable is set to 1 the command will create a flow steering table
+ *             and will classify packets according to this table. The packets
+ *             that miss all the table rules will be classified according to
+ *             settings made in dpni_set_rx_hash_dist()
+ * If cfg.enable is set to 0 the command will clear flow steering table. The
+ *             packets will be classified according to settings made in
+ *             dpni_set_rx_hash_dist()
+ */
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+               uint16_t token, const struct dpni_rx_dist_cfg *cfg)
+{
+       struct dpni_cmd_set_rx_fs_dist *cmd_params;
+       struct mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
+       cmd_params->dist_size   = cpu_to_le16(cfg->dist_size);
+       dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+       cmd_params->tc = cfg->tc;
+       cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
+       cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPNI object
+ * @cfg: Distribution configuration
+ * If cfg.enable is set to 1 the packets will be classified using a hash
+ *             function based on the key received in cfg.key_cfg_iova parameter
+ * If cfg.enable is set to 0 the packets will be sent to the queue configured in
+ *             dpni_set_rx_dist_default_queue() call
+ */
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+               uint16_t token, const struct dpni_rx_dist_cfg *cfg)
+{
+       struct dpni_cmd_set_rx_hash_dist *cmd_params;
+       struct mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
+       cmd_params->dist_size   = cpu_to_le16(cfg->dist_size);
+       dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+       cmd_params->tc_id               = cfg->tc;
+       cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+
 /**
  * dpni_add_custom_tpid() - Configures a distinct Ethertype value
  *             (or TPID value) to indicate VLAN tag in addition to the common
index 0359a2b..aecdc8d 100644 (file)
@@ -1071,6 +1071,123 @@ int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io,
                                  uint16_t token,
                                  enum dpni_confirmation_mode *mode);
 
+/**
+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ *             key extractions to be used as the QoS criteria by calling
+ *             dpkg_prepare_key_cfg()
+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
+ *             '0' to use the 'default_tc' in such cases
+ * @keep_entries: if set to one will not delele existing table entries. This
+ *             option will work properly only for dpni objects created with
+ *             DPNI_OPT_HAS_KEY_MASKING option. All previous QoS entries must
+ *             be compatible with new key composition rule.
+ *             It is the caller's job to delete incompatible entries before
+ *             executing this function.
+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
+ */
+struct dpni_qos_tbl_cfg {
+       uint64_t key_cfg_iova;
+       int discard_on_miss;
+       int keep_entries;
+       uint8_t default_tc;
+};
+
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+                      uint32_t cmd_flags,
+                      uint16_t token,
+                      const struct dpni_qos_tbl_cfg *cfg);
+
+/**
+ * struct dpni_rule_cfg - Rule configuration for table lookup
+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
+ * @key_size: key and mask size (in bytes)
+ */
+struct dpni_rule_cfg {
+       uint64_t key_iova;
+       uint64_t mask_iova;
+       uint8_t key_size;
+};
+
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+                      uint32_t cmd_flags,
+                      uint16_t token,
+                      const struct dpni_rule_cfg *cfg,
+                      uint8_t tc_id,
+                      uint16_t index);
+
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+                         uint32_t cmd_flags,
+                         uint16_t token,
+                         const struct dpni_rule_cfg *cfg);
+
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+                        uint32_t cmd_flags,
+                        uint16_t token);
+
+/**
+ * Discard matching traffic.  If set, this takes precedence over any other
+ * configuration and matching traffic is always discarded.
+ */
+ #define DPNI_FS_OPT_DISCARD            0x1
+
+/**
+ * Set FLC value.  If set, flc member of truct dpni_fs_action_cfg is used to
+ * override the FLC value set per queue.
+ * For more details check the Frame Descriptor section in the hardware
+ * documentation.
+ */
+#define DPNI_FS_OPT_SET_FLC            0x2
+
+/*
+ * Indicates whether the 6 lowest significant bits of FLC are used for stash
+ * control.  If set, the 6 least significant bits in value are interpreted as
+ * follows:
+ *     - bits 0-1: indicates the number of 64 byte units of context that are
+ *     stashed.  FLC value is interpreted as a memory address in this case,
+ *     excluding the 6 LS bits.
+ *     - bits 2-3: indicates the number of 64 byte units of frame annotation
+ *     to be stashed.  Annotation is placed at FD[ADDR].
+ *     - bits 4-5: indicates the number of 64 byte units of frame data to be
+ *     stashed.  Frame data is placed at FD[ADDR] + FD[OFFSET].
+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
+ */
+#define DPNI_FS_OPT_SET_STASH_CONTROL  0x4
+
+/**
+ * struct dpni_fs_action_cfg - Action configuration for table look-up
+ * @flc: FLC value for traffic matching this rule.  Please check the Frame
+ * Descriptor section in the hardware documentation for more information.
+ * @flow_id: Identifies the Rx queue used for matching traffic.  Supported
+ *     values are in range 0 to num_queue-1.
+ * @options: Any combination of DPNI_FS_OPT_ values.
+ */
+struct dpni_fs_action_cfg {
+       uint64_t flc;
+       uint16_t flow_id;
+       uint16_t options;
+};
+
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+                     uint32_t cmd_flags,
+                     uint16_t token,
+                     uint8_t tc_id,
+                     uint16_t index,
+                     const struct dpni_rule_cfg *cfg,
+                     const struct dpni_fs_action_cfg *action);
+
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+                        uint32_t cmd_flags,
+                        uint16_t token,
+                        uint8_t tc_id,
+                        const struct dpni_rule_cfg *cfg);
+
+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
+                         uint32_t cmd_flags,
+                         uint16_t token,
+                         uint8_t tc_id);
+
 int dpni_get_api_version(struct fsl_mc_io *mc_io,
                         uint32_t cmd_flags,
                         uint16_t *major_ver,
@@ -1202,6 +1319,43 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
                 struct opr_cfg *cfg,
                 struct opr_qry *qry);
 
+/**
+ * When used for queue_idx in function dpni_set_rx_dist_default_queue will
+ * signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP              ((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - distribution configuration
+ * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
+ *             12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
+ *             512,768,896,1024
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ *             the extractions to be used for the distribution key by calling
+ *             dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
+ *             it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ *             hash is disabled it will be put into this queue id; use
+ *             DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ *             used only when flow steering distribution is enabled and hash
+ *             distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+       uint16_t dist_size;
+       uint64_t key_cfg_iova;
+       uint8_t enable;
+       uint8_t tc;
+       uint16_t fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+               uint16_t token, const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+               uint16_t token, const struct dpni_rx_dist_cfg *cfg);
+
 int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
                uint16_t token, uint16_t tpid);
 
index 81830ed..9116e41 100644 (file)
 
 #define DPNI_CMDID_SET_RX_TC_DIST              DPNI_CMD_V3(0x235)
 
+#define DPNI_CMDID_SET_QOS_TBL                 DPNI_CMD_V2(0x240)
+#define DPNI_CMDID_ADD_QOS_ENT                 DPNI_CMD(0x241)
+#define DPNI_CMDID_REMOVE_QOS_ENT              DPNI_CMD(0x242)
+#define DPNI_CMDID_CLR_QOS_TBL                 DPNI_CMD(0x243)
+#define DPNI_CMDID_ADD_FS_ENT                  DPNI_CMD(0x244)
+#define DPNI_CMDID_REMOVE_FS_ENT               DPNI_CMD(0x245)
+#define DPNI_CMDID_CLR_FS_ENT                  DPNI_CMD(0x246)
+
 #define DPNI_CMDID_GET_STATISTICS              DPNI_CMD_V2(0x25D)
 #define DPNI_CMDID_RESET_STATISTICS            DPNI_CMD(0x25E)
 #define DPNI_CMDID_GET_QUEUE                   DPNI_CMD(0x25F)
@@ -91,6 +99,8 @@
 #define DPNI_CMDID_GET_TX_CONFIRMATION_MODE    DPNI_CMD(0x26D)
 #define DPNI_CMDID_SET_OPR                     DPNI_CMD(0x26e)
 #define DPNI_CMDID_GET_OPR                     DPNI_CMD(0x26f)
+#define DPNI_CMDID_SET_RX_FS_DIST              DPNI_CMD(0x273)
+#define DPNI_CMDID_SET_RX_HASH_DIST            DPNI_CMD(0x274)
 #define DPNI_CMDID_ADD_CUSTOM_TPID             DPNI_CMD(0x275)
 #define DPNI_CMDID_REMOVE_CUSTOM_TPID          DPNI_CMD(0x276)
 #define DPNI_CMDID_GET_CUSTOM_TPID             DPNI_CMD(0x277)
@@ -495,6 +505,63 @@ struct dpni_cmd_set_queue {
        uint64_t user_context;
 };
 
+#define DPNI_DISCARD_ON_MISS_SHIFT     0
+#define DPNI_DISCARD_ON_MISS_SIZE      1
+#define DPNI_KEEP_QOS_ENTRIES_SHIFT            1
+#define DPNI_KEEP_QOS_ENTRIES_SIZE             1
+
+struct dpni_cmd_set_qos_table {
+       uint32_t pad;
+       uint8_t default_tc;
+       /* only the LSB */
+       uint8_t discard_on_miss;
+       uint16_t pad1[21];
+       uint64_t key_cfg_iova;
+};
+
+struct dpni_cmd_add_qos_entry {
+       uint16_t pad;
+       uint8_t tc_id;
+       uint8_t key_size;
+       uint16_t index;
+       uint16_t pad2;
+       uint64_t key_iova;
+       uint64_t mask_iova;
+};
+
+struct dpni_cmd_remove_qos_entry {
+       uint8_t pad1[3];
+       uint8_t key_size;
+       uint32_t pad2;
+       uint64_t key_iova;
+       uint64_t mask_iova;
+};
+
+struct dpni_cmd_add_fs_entry {
+       uint16_t options;
+       uint8_t tc_id;
+       uint8_t key_size;
+       uint16_t index;
+       uint16_t flow_id;
+       uint64_t key_iova;
+       uint64_t mask_iova;
+       uint64_t flc;
+};
+
+struct dpni_cmd_remove_fs_entry {
+       uint16_t pad1;
+       uint8_t tc_id;
+       uint8_t key_size;
+       uint32_t pad2;
+       uint64_t key_iova;
+       uint64_t mask_iova;
+};
+
+struct dpni_cmd_clear_fs_entries {
+       uint16_t pad;
+       uint8_t tc_id;
+};
+
 #define DPNI_DROP_ENABLE_SHIFT 0
 #define DPNI_DROP_ENABLE_SIZE  1
 #define DPNI_DROP_UNITS_SHIFT  2
@@ -692,5 +759,26 @@ struct dpni_rsp_get_custom_tpid {
        uint16_t        tpid2;
 };
 
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT   0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE            1
+struct dpni_cmd_set_rx_fs_dist {
+       uint16_t        dist_size;
+       uint8_t         enable;
+       uint8_t         tc;
+       uint16_t        miss_flow_id;
+       uint16_t        pad1;
+       uint64_t        key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE          1
+struct dpni_cmd_set_rx_hash_dist {
+       uint16_t        dist_size;
+       uint8_t         enable;
+       uint8_t         tc_id;
+       uint32_t        pad;
+       uint64_t        key_cfg_iova;
+};
+
 #pragma pack(pop)
 #endif /* _FSL_DPNI_CMD_H */
index 801cbf5..53e1d81 100644 (file)
@@ -11,6 +11,7 @@ deps += ['mempool_dpaa2']
 sources = files('base/dpaa2_hw_dpni.c',
                'dpaa2_mux.c',
                'dpaa2_ethdev.c',
+               'dpaa2_flow.c',
                'dpaa2_rxtx.c',
                'mc/dpkg.c',
                'mc/dpdmux.c',