net/dpaa2: add Tx/Rx burst mode info
[dpdk.git] / drivers / net / dpaa2 / dpaa2_flow.c
index 9a449ca..8430213 100644 (file)
  */
 int mc_l4_port_identification;
 
+static char *dpaa2_flow_control_log;
+static int dpaa2_flow_miss_flow_id =
+       DPNI_FS_MISS_DROP;
+
+#define FIXED_ENTRY_SIZE 54
+
 enum flow_rule_ipaddr_type {
        FLOW_NONE_IPADDR,
        FLOW_IPV4_ADDR,
@@ -47,14 +53,11 @@ struct rte_flow {
        LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
        struct dpni_rule_cfg qos_rule;
        struct dpni_rule_cfg fs_rule;
-       uint16_t qos_index;
-       uint16_t fs_index;
-       uint8_t key_size;
+       uint8_t qos_real_key_size;
+       uint8_t fs_real_key_size;
        uint8_t tc_id; /** Traffic Class ID. */
-       uint8_t flow_type;
        uint8_t tc_index; /** index within this Traffic Class. */
        enum rte_flow_action_type action;
-       uint16_t flow_id;
        /* Special for IP address to specify the offset
         * in key/mask.
         */
@@ -149,6 +152,189 @@ static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
 
 #endif
 
+static inline void dpaa2_prot_field_string(
+       enum net_prot prot, uint32_t field,
+       char *string)
+{
+       if (!dpaa2_flow_control_log)
+               return;
+
+       if (prot == NET_PROT_ETH) {
+               strcpy(string, "eth");
+               if (field == NH_FLD_ETH_DA)
+                       strcat(string, ".dst");
+               else if (field == NH_FLD_ETH_SA)
+                       strcat(string, ".src");
+               else if (field == NH_FLD_ETH_TYPE)
+                       strcat(string, ".type");
+               else
+                       strcat(string, ".unknown field");
+       } else if (prot == NET_PROT_VLAN) {
+               strcpy(string, "vlan");
+               if (field == NH_FLD_VLAN_TCI)
+                       strcat(string, ".tci");
+               else
+                       strcat(string, ".unknown field");
+       } else if (prot == NET_PROT_IP) {
+               strcpy(string, "ip");
+               if (field == NH_FLD_IP_SRC)
+                       strcat(string, ".src");
+               else if (field == NH_FLD_IP_DST)
+                       strcat(string, ".dst");
+               else if (field == NH_FLD_IP_PROTO)
+                       strcat(string, ".proto");
+               else
+                       strcat(string, ".unknown field");
+       } else if (prot == NET_PROT_TCP) {
+               strcpy(string, "tcp");
+               if (field == NH_FLD_TCP_PORT_SRC)
+                       strcat(string, ".src");
+               else if (field == NH_FLD_TCP_PORT_DST)
+                       strcat(string, ".dst");
+               else
+                       strcat(string, ".unknown field");
+       } else if (prot == NET_PROT_UDP) {
+               strcpy(string, "udp");
+               if (field == NH_FLD_UDP_PORT_SRC)
+                       strcat(string, ".src");
+               else if (field == NH_FLD_UDP_PORT_DST)
+                       strcat(string, ".dst");
+               else
+                       strcat(string, ".unknown field");
+       } else if (prot == NET_PROT_ICMP) {
+               strcpy(string, "icmp");
+               if (field == NH_FLD_ICMP_TYPE)
+                       strcat(string, ".type");
+               else if (field == NH_FLD_ICMP_CODE)
+                       strcat(string, ".code");
+               else
+                       strcat(string, ".unknown field");
+       } else if (prot == NET_PROT_SCTP) {
+               strcpy(string, "sctp");
+               if (field == NH_FLD_SCTP_PORT_SRC)
+                       strcat(string, ".src");
+               else if (field == NH_FLD_SCTP_PORT_DST)
+                       strcat(string, ".dst");
+               else
+                       strcat(string, ".unknown field");
+       } else if (prot == NET_PROT_GRE) {
+               strcpy(string, "gre");
+               if (field == NH_FLD_GRE_TYPE)
+                       strcat(string, ".type");
+               else
+                       strcat(string, ".unknown field");
+       } else {
+               strcpy(string, "unknown protocol");
+       }
+}
+
+static inline void dpaa2_flow_qos_table_extracts_log(
+       const struct dpaa2_dev_priv *priv)
+{
+       int idx;
+       char string[32];
+
+       if (!dpaa2_flow_control_log)
+               return;
+
+       printf("Setup QoS table: number of extracts: %d\r\n",
+                       priv->extract.qos_key_extract.dpkg.num_extracts);
+       for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
+               idx++) {
+               dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
+                       .extracts[idx].extract.from_hdr.prot,
+                       priv->extract.qos_key_extract.dpkg.extracts[idx]
+                       .extract.from_hdr.field,
+                       string);
+               printf("%s", string);
+               if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
+                       printf(" / ");
+       }
+       printf("\r\n");
+}
+
+static inline void dpaa2_flow_fs_table_extracts_log(
+       const struct dpaa2_dev_priv *priv, int tc_id)
+{
+       int idx;
+       char string[32];
+
+       if (!dpaa2_flow_control_log)
+               return;
+
+       printf("Setup FS table: number of extracts of TC[%d]: %d\r\n",
+                       tc_id, priv->extract.tc_key_extract[tc_id]
+                       .dpkg.num_extracts);
+       for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
+               .dpkg.num_extracts; idx++) {
+               dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
+                       .dpkg.extracts[idx].extract.from_hdr.prot,
+                       priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
+                       .extract.from_hdr.field,
+                       string);
+               printf("%s", string);
+               if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
+                       .dpkg.num_extracts)
+                       printf(" / ");
+       }
+       printf("\r\n");
+}
+
+static inline void dpaa2_flow_qos_entry_log(
+       const char *log_info, const struct rte_flow *flow, int qos_index)
+{
+       int idx;
+       uint8_t *key, *mask;
+
+       if (!dpaa2_flow_control_log)
+               return;
+
+       printf("\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
+               log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
+
+       key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
+       mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
+
+       printf("key:\r\n");
+       for (idx = 0; idx < flow->qos_real_key_size; idx++)
+               printf("%02x ", key[idx]);
+
+       printf("\r\nmask:\r\n");
+       for (idx = 0; idx < flow->qos_real_key_size; idx++)
+               printf("%02x ", mask[idx]);
+
+       printf("\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
+               flow->ipaddr_rule.qos_ipsrc_offset,
+               flow->ipaddr_rule.qos_ipdst_offset);
+}
+
+static inline void dpaa2_flow_fs_entry_log(
+       const char *log_info, const struct rte_flow *flow)
+{
+       int idx;
+       uint8_t *key, *mask;
+
+       if (!dpaa2_flow_control_log)
+               return;
+
+       printf("\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
+               log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
+
+       key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
+       mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
+
+       printf("key:\r\n");
+       for (idx = 0; idx < flow->fs_real_key_size; idx++)
+               printf("%02x ", key[idx]);
+
+       printf("\r\nmask:\r\n");
+       for (idx = 0; idx < flow->fs_real_key_size; idx++)
+               printf("%02x ", mask[idx]);
+
+       printf("\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
+               flow->ipaddr_rule.fs_ipsrc_offset,
+               flow->ipaddr_rule.fs_ipdst_offset);
+}
 
 static inline void dpaa2_flow_extract_key_set(
        struct dpaa2_key_info *key_info, int index, uint8_t size)
@@ -307,6 +493,42 @@ static int dpaa2_flow_extract_add(
        return 0;
 }
 
+static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
+                                     int size)
+{
+       struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
+       struct dpaa2_key_info *key_info = &key_extract->key_info;
+       int last_extract_size, index;
+
+       if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
+           DPKG_EXTRACT_FROM_DATA) {
+               DPAA2_PMD_WARN("RAW extract cannot be combined with others");
+               return -1;
+       }
+
+       last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
+       dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
+       if (last_extract_size)
+               dpkg->num_extracts++;
+       else
+               last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
+
+       for (index = 0; index < dpkg->num_extracts; index++) {
+               dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
+               if (index == dpkg->num_extracts - 1)
+                       dpkg->extracts[index].extract.from_data.size =
+                               last_extract_size;
+               else
+                       dpkg->extracts[index].extract.from_data.size =
+                               DPAA2_FLOW_MAX_KEY_SIZE;
+               dpkg->extracts[index].extract.from_data.offset =
+                       DPAA2_FLOW_MAX_KEY_SIZE * index;
+       }
+
+       key_info->key_total_size = size;
+       return 0;
+}
+
 /* Protocol discrimination.
  * Discriminate IPv4/IPv6/vLan by Eth type.
  * Discriminate UDP/TCP/ICMP by next proto of IP.
@@ -481,6 +703,19 @@ dpaa2_flow_rule_data_set(
                        prot, field);
                return -1;
        }
+
+       memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
+       memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
+
+       return 0;
+}
+
+static inline int
+dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
+                            const void *key, const void *mask, int size)
+{
+       int offset = 0;
+
        memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
        memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
 
@@ -526,9 +761,11 @@ _dpaa2_flow_rule_move_ipaddr_tail(
                len = NH_FLD_IPV6_ADDR_SIZE;
 
        memcpy(tmp, (char *)key_src, len);
+       memset((char *)key_src, 0, len);
        memcpy((char *)key_dst, tmp, len);
 
        memcpy(tmp, (char *)mask_src, len);
+       memset((char *)mask_src, 0, len);
        memcpy((char *)mask_dst, tmp, len);
 
        return 0;
@@ -1096,6 +1333,70 @@ dpaa2_configure_flow_vlan(struct rte_flow *flow,
        return 0;
 }
 
+static int
+dpaa2_configure_flow_ip_discrimation(
+       struct dpaa2_dev_priv *priv, struct rte_flow *flow,
+       const struct rte_flow_item *pattern,
+       int *local_cfg, int *device_configured,
+       uint32_t group)
+{
+       int index, ret;
+       struct proto_discrimination proto;
+
+       index = dpaa2_flow_extract_search(
+                       &priv->extract.qos_key_extract.dpkg,
+                       NET_PROT_ETH, NH_FLD_ETH_TYPE);
+       if (index < 0) {
+               ret = dpaa2_flow_proto_discrimination_extract(
+                               &priv->extract.qos_key_extract,
+                               RTE_FLOW_ITEM_TYPE_ETH);
+               if (ret) {
+                       DPAA2_PMD_ERR(
+                       "QoS Extract ETH_TYPE to discriminate IP failed.");
+                       return -1;
+               }
+               (*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
+       }
+
+       index = dpaa2_flow_extract_search(
+                       &priv->extract.tc_key_extract[group].dpkg,
+                       NET_PROT_ETH, NH_FLD_ETH_TYPE);
+       if (index < 0) {
+               ret = dpaa2_flow_proto_discrimination_extract(
+                               &priv->extract.tc_key_extract[group],
+                               RTE_FLOW_ITEM_TYPE_ETH);
+               if (ret) {
+                       DPAA2_PMD_ERR(
+                       "FS Extract ETH_TYPE to discriminate IP failed.");
+                       return -1;
+               }
+               (*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+       ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+       if (ret) {
+               DPAA2_PMD_ERR(
+                       "Move ipaddr before IP discrimination set failed");
+               return -1;
+       }
+
+       proto.type = RTE_FLOW_ITEM_TYPE_ETH;
+       if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
+               proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+       else
+               proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+       ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
+       if (ret) {
+               DPAA2_PMD_ERR("IP discrimination rule set failed");
+               return -1;
+       }
+
+       (*device_configured) |= (*local_cfg);
+
+       return 0;
+}
+
+
 static int
 dpaa2_configure_flow_generic_ip(
        struct rte_flow *flow,
@@ -1139,73 +1440,16 @@ dpaa2_configure_flow_generic_ip(
        flow->tc_id = group;
        flow->tc_index = attr->priority;
 
-       if (!spec_ipv4 && !spec_ipv6) {
-               /* Don't care any field of IP header,
-                * only care IP protocol.
-                * Example: flow create 0 ingress pattern ipv6 /
-                */
-               /* Eth type is actually used for IP identification.
-                */
-               /* TODO: Current design only supports Eth + IP,
-                *  Eth + vLan + IP needs to add.
-                */
-               struct proto_discrimination proto;
-
-               index = dpaa2_flow_extract_search(
-                               &priv->extract.qos_key_extract.dpkg,
-                               NET_PROT_ETH, NH_FLD_ETH_TYPE);
-               if (index < 0) {
-                       ret = dpaa2_flow_proto_discrimination_extract(
-                                       &priv->extract.qos_key_extract,
-                                       RTE_FLOW_ITEM_TYPE_ETH);
-                       if (ret) {
-                               DPAA2_PMD_ERR(
-                               "QoS Ext ETH_TYPE to discriminate IP failed.");
-
-                               return -1;
-                       }
-                       local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
-               }
-
-               index = dpaa2_flow_extract_search(
-                               &priv->extract.tc_key_extract[group].dpkg,
-                               NET_PROT_ETH, NH_FLD_ETH_TYPE);
-               if (index < 0) {
-                       ret = dpaa2_flow_proto_discrimination_extract(
-                                       &priv->extract.tc_key_extract[group],
-                                       RTE_FLOW_ITEM_TYPE_ETH);
-                       if (ret) {
-                               DPAA2_PMD_ERR(
-                               "FS Ext ETH_TYPE to discriminate IP failed");
-
-                               return -1;
-                       }
-                       local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
-               }
-
-               ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
-               if (ret) {
-                       DPAA2_PMD_ERR(
-                       "Move ipaddr before IP discrimination set failed");
-                       return -1;
-               }
-
-               proto.type = RTE_FLOW_ITEM_TYPE_ETH;
-               if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
-                       proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
-               else
-                       proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-               ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
-                                                       proto, group);
-               if (ret) {
-                       DPAA2_PMD_ERR("IP discrimination rule set failed");
-                       return -1;
-               }
-
-               (*device_configured) |= local_cfg;
+       ret = dpaa2_configure_flow_ip_discrimation(priv,
+                       flow, pattern, &local_cfg,
+                       device_configured, group);
+       if (ret) {
+               DPAA2_PMD_ERR("IP discrimation failed!");
+               return -1;
+       }
 
+       if (!spec_ipv4 && !spec_ipv6)
                return 0;
-       }
 
        if (mask_ipv4) {
                if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
@@ -1245,17 +1489,16 @@ dpaa2_configure_flow_generic_ip(
                                NET_PROT_IP, NH_FLD_IP_SRC);
                if (index < 0) {
                        ret = dpaa2_flow_extract_add(
-                                               &priv->extract.qos_key_extract,
-                                               NET_PROT_IP,
-                                               NH_FLD_IP_SRC,
-                                               0);
+                                       &priv->extract.qos_key_extract,
+                                       NET_PROT_IP,
+                                       NH_FLD_IP_SRC,
+                                       0);
                        if (ret) {
                                DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
 
                                return -1;
                        }
-                       local_cfg |= (DPAA2_QOS_TABLE_RECONFIGURE |
-                               DPAA2_QOS_TABLE_IPADDR_EXTRACT);
+                       local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
                }
 
                index = dpaa2_flow_extract_search(
@@ -1272,8 +1515,7 @@ dpaa2_configure_flow_generic_ip(
 
                                return -1;
                        }
-                       local_cfg |= (DPAA2_FS_TABLE_RECONFIGURE |
-                               DPAA2_FS_TABLE_IPADDR_EXTRACT);
+                       local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
                }
 
                if (spec_ipv4)
@@ -1333,17 +1575,16 @@ dpaa2_configure_flow_generic_ip(
                        else
                                size = NH_FLD_IPV6_ADDR_SIZE;
                        ret = dpaa2_flow_extract_add(
-                                               &priv->extract.qos_key_extract,
-                                               NET_PROT_IP,
-                                               NH_FLD_IP_DST,
-                                               size);
+                                       &priv->extract.qos_key_extract,
+                                       NET_PROT_IP,
+                                       NH_FLD_IP_DST,
+                                       size);
                        if (ret) {
                                DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
 
                                return -1;
                        }
-                       local_cfg |= (DPAA2_QOS_TABLE_RECONFIGURE |
-                               DPAA2_QOS_TABLE_IPADDR_EXTRACT);
+                       local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
                }
 
                index = dpaa2_flow_extract_search(
@@ -1364,8 +1605,7 @@ dpaa2_configure_flow_generic_ip(
 
                                return -1;
                        }
-                       local_cfg |= (DPAA2_FS_TABLE_RECONFIGURE |
-                               DPAA2_FS_TABLE_IPADDR_EXTRACT);
+                       local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
                }
 
                if (spec_ipv4)
@@ -2622,6 +2862,83 @@ dpaa2_configure_flow_gre(struct rte_flow *flow,
        return 0;
 }
 
+static int
+dpaa2_configure_flow_raw(struct rte_flow *flow,
+                        struct rte_eth_dev *dev,
+                        const struct rte_flow_attr *attr,
+                        const struct rte_flow_item *pattern,
+                        const struct rte_flow_action actions[] __rte_unused,
+                        struct rte_flow_error *error __rte_unused,
+                        int *device_configured)
+{
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+       const struct rte_flow_item_raw *spec = pattern->spec;
+       const struct rte_flow_item_raw *mask = pattern->mask;
+       int prev_key_size =
+               priv->extract.qos_key_extract.key_info.key_total_size;
+       int local_cfg = 0, ret;
+       uint32_t group;
+
+       /* Need both spec and mask */
+       if (!spec || !mask) {
+               DPAA2_PMD_ERR("spec or mask not present.");
+               return -EINVAL;
+       }
+       /* Only supports non-relative with offset 0 */
+       if (spec->relative || spec->offset != 0 ||
+           spec->search || spec->limit) {
+               DPAA2_PMD_ERR("relative and non zero offset not supported.");
+               return -EINVAL;
+       }
+       /* Spec len and mask len should be same */
+       if (spec->length != mask->length) {
+               DPAA2_PMD_ERR("Spec len and mask len mismatch.");
+               return -EINVAL;
+       }
+
+       /* Get traffic class index and flow id to be configured */
+       group = attr->group;
+       flow->tc_id = group;
+       flow->tc_index = attr->priority;
+
+       if (prev_key_size < spec->length) {
+               ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
+                                                spec->length);
+               if (ret) {
+                       DPAA2_PMD_ERR("QoS Extract RAW add failed.");
+                       return -1;
+               }
+               local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+
+               ret = dpaa2_flow_extract_add_raw(
+                                       &priv->extract.tc_key_extract[group],
+                                       spec->length);
+               if (ret) {
+                       DPAA2_PMD_ERR("FS Extract RAW add failed.");
+                       return -1;
+               }
+               local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+       }
+
+       ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
+                                          mask->pattern, spec->length);
+       if (ret) {
+               DPAA2_PMD_ERR("QoS RAW rule data set failed");
+               return -1;
+       }
+
+       ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
+                                          mask->pattern, spec->length);
+       if (ret) {
+               DPAA2_PMD_ERR("FS RAW rule data set failed");
+               return -1;
+       }
+
+       (*device_configured) |= local_cfg;
+
+       return 0;
+}
+
 /* The existing QoS/FS entry with IP address(es)
  * needs update after
  * new extract(s) are inserted before IP
@@ -2644,7 +2961,8 @@ dpaa2_flow_entry_update(
        char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
        char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
        char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
-       int extend = -1, extend1, size;
+       int extend = -1, extend1, size = -1;
+       uint16_t qos_index;
 
        while (curr) {
                if (curr->ipaddr_rule.ipaddr_type ==
@@ -2676,11 +2994,18 @@ dpaa2_flow_entry_update(
                        size = NH_FLD_IPV6_ADDR_SIZE;
                }
 
-               ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
-                               priv->token, &curr->qos_rule);
-               if (ret) {
-                       DPAA2_PMD_ERR("Qos entry remove failed.");
-                       return -1;
+               qos_index = curr->tc_id * priv->fs_entries +
+                       curr->tc_index;
+
+               dpaa2_flow_qos_entry_log("Before update", curr, qos_index);
+
+               if (priv->num_rx_tc > 1) {
+                       ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
+                                       priv->token, &curr->qos_rule);
+                       if (ret) {
+                               DPAA2_PMD_ERR("Qos entry remove failed.");
+                               return -1;
+                       }
                }
 
                extend = -1;
@@ -2695,6 +3020,9 @@ dpaa2_flow_entry_update(
                        else
                                extend = extend1;
 
+                       RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
+                               (size == NH_FLD_IPV6_ADDR_SIZE));
+
                        memcpy(ipsrc_key,
                                (char *)(size_t)curr->qos_rule.key_iova +
                                curr->ipaddr_rule.qos_ipsrc_offset,
@@ -2724,6 +3052,9 @@ dpaa2_flow_entry_update(
                        else
                                extend = extend1;
 
+                       RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
+                               (size == NH_FLD_IPV6_ADDR_SIZE));
+
                        memcpy(ipdst_key,
                                (char *)(size_t)curr->qos_rule.key_iova +
                                curr->ipaddr_rule.qos_ipdst_offset,
@@ -2744,6 +3075,8 @@ dpaa2_flow_entry_update(
                }
 
                if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
+                       RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
+                               (size == NH_FLD_IPV6_ADDR_SIZE));
                        memcpy((char *)(size_t)curr->qos_rule.key_iova +
                                curr->ipaddr_rule.qos_ipsrc_offset,
                                ipsrc_key,
@@ -2754,6 +3087,8 @@ dpaa2_flow_entry_update(
                                size);
                }
                if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
+                       RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
+                               (size == NH_FLD_IPV6_ADDR_SIZE));
                        memcpy((char *)(size_t)curr->qos_rule.key_iova +
                                curr->ipaddr_rule.qos_ipdst_offset,
                                ipdst_key,
@@ -2765,15 +3100,21 @@ dpaa2_flow_entry_update(
                }
 
                if (extend >= 0)
-                       curr->qos_rule.key_size += extend;
+                       curr->qos_real_key_size += extend;
 
-               ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
-                               priv->token, &curr->qos_rule,
-                               curr->tc_id, curr->qos_index,
-                               0, 0);
-               if (ret) {
-                       DPAA2_PMD_ERR("Qos entry update failed.");
-                       return -1;
+               curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
+
+               dpaa2_flow_qos_entry_log("Start update", curr, qos_index);
+
+               if (priv->num_rx_tc > 1) {
+                       ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
+                                       priv->token, &curr->qos_rule,
+                                       curr->tc_id, qos_index,
+                                       0, 0);
+                       if (ret) {
+                               DPAA2_PMD_ERR("Qos entry update failed.");
+                               return -1;
+                       }
                }
 
                if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
@@ -2781,6 +3122,7 @@ dpaa2_flow_entry_update(
                        continue;
                }
 
+               dpaa2_flow_fs_entry_log("Before update", curr);
                extend = -1;
 
                ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
@@ -2872,10 +3214,13 @@ dpaa2_flow_entry_update(
                }
 
                if (extend >= 0)
-                       curr->fs_rule.key_size += extend;
+                       curr->fs_real_key_size += extend;
+               curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
+
+               dpaa2_flow_fs_entry_log("Start update", curr);
 
                ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
-                               priv->token, curr->tc_id, curr->fs_index,
+                               priv->token, curr->tc_id, curr->tc_index,
                                &curr->fs_rule, &curr->action_cfg);
                if (ret) {
                        DPAA2_PMD_ERR("FS entry update failed.");
@@ -2888,6 +3233,89 @@ dpaa2_flow_entry_update(
        return 0;
 }
 
+static inline int
+dpaa2_flow_verify_attr(
+       struct dpaa2_dev_priv *priv,
+       const struct rte_flow_attr *attr)
+{
+       struct rte_flow *curr = LIST_FIRST(&priv->flows);
+
+       while (curr) {
+               if (curr->tc_id == attr->group &&
+                       curr->tc_index == attr->priority) {
+                       DPAA2_PMD_ERR(
+                               "Flow with group %d and priority %d already exists.",
+                               attr->group, attr->priority);
+
+                       return -1;
+               }
+               curr = LIST_NEXT(curr, next);
+       }
+
+       return 0;
+}
+
+static inline int
+dpaa2_flow_verify_action(
+       struct dpaa2_dev_priv *priv,
+       const struct rte_flow_attr *attr,
+       const struct rte_flow_action actions[])
+{
+       int end_of_list = 0, i, j = 0;
+       const struct rte_flow_action_queue *dest_queue;
+       const struct rte_flow_action_rss *rss_conf;
+       struct dpaa2_queue *rxq;
+
+       while (!end_of_list) {
+               switch (actions[j].type) {
+               case RTE_FLOW_ACTION_TYPE_QUEUE:
+                       dest_queue = (const struct rte_flow_action_queue *)
+                                       (actions[j].conf);
+                       rxq = priv->rx_vq[dest_queue->index];
+                       if (attr->group != rxq->tc_index) {
+                               DPAA2_PMD_ERR(
+                                       "RXQ[%d] does not belong to the group %d",
+                                       dest_queue->index, attr->group);
+
+                               return -1;
+                       }
+                       break;
+               case RTE_FLOW_ACTION_TYPE_RSS:
+                       rss_conf = (const struct rte_flow_action_rss *)
+                                       (actions[j].conf);
+                       if (rss_conf->queue_num > priv->dist_queues) {
+                               DPAA2_PMD_ERR(
+                                       "RSS number exceeds the distrbution size");
+                               return -ENOTSUP;
+                       }
+                       for (i = 0; i < (int)rss_conf->queue_num; i++) {
+                               if (rss_conf->queue[i] >= priv->nb_rx_queues) {
+                                       DPAA2_PMD_ERR(
+                                               "RSS queue index exceeds the number of RXQs");
+                                       return -ENOTSUP;
+                               }
+                               rxq = priv->rx_vq[rss_conf->queue[i]];
+                               if (rxq->tc_index != attr->group) {
+                                       DPAA2_PMD_ERR(
+                                               "Queue/Group combination are not supported\n");
+                                       return -ENOTSUP;
+                               }
+                       }
+
+                       break;
+               case RTE_FLOW_ACTION_TYPE_END:
+                       end_of_list = 1;
+                       break;
+               default:
+                       DPAA2_PMD_ERR("Invalid action type");
+                       return -ENOTSUP;
+               }
+               j++;
+       }
+
+       return 0;
+}
+
 static int
 dpaa2_generic_flow_set(struct rte_flow *flow,
                       struct rte_eth_dev *dev,
@@ -2898,17 +3326,25 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 {
        const struct rte_flow_action_queue *dest_queue;
        const struct rte_flow_action_rss *rss_conf;
-       uint16_t index;
        int is_keycfg_configured = 0, end_of_list = 0;
        int ret = 0, i = 0, j = 0;
-       struct dpni_attr nic_attr;
-       struct dpni_rx_tc_dist_cfg tc_cfg;
+       struct dpni_rx_dist_cfg tc_cfg;
        struct dpni_qos_tbl_cfg qos_cfg;
        struct dpni_fs_action_cfg action;
        struct dpaa2_dev_priv *priv = dev->data->dev_private;
+       struct dpaa2_queue *rxq;
        struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
        size_t param;
        struct rte_flow *curr = LIST_FIRST(&priv->flows);
+       uint16_t qos_index;
+
+       ret = dpaa2_flow_verify_attr(priv, attr);
+       if (ret)
+               return ret;
+
+       ret = dpaa2_flow_verify_action(priv, attr, actions);
+       if (ret)
+               return ret;
 
        /* Parse pattern list to get the matching parameters */
        while (!end_of_list) {
@@ -2986,6 +3422,16 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
                                return ret;
                        }
                        break;
+               case RTE_FLOW_ITEM_TYPE_RAW:
+                       ret = dpaa2_configure_flow_raw(flow,
+                                                      dev, attr, &pattern[i],
+                                                      actions, error,
+                                                      &is_keycfg_configured);
+                       if (ret) {
+                               DPAA2_PMD_ERR("RAW flow configuration failed!");
+                               return ret;
+                       }
+                       break;
                case RTE_FLOW_ITEM_TYPE_END:
                        end_of_list = 1;
                        break; /*End of List*/
@@ -3002,33 +3448,16 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
        while (!end_of_list) {
                switch (actions[j].type) {
                case RTE_FLOW_ACTION_TYPE_QUEUE:
-                       dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
-                       flow->flow_id = dest_queue->index;
+                       dest_queue =
+                               (const struct rte_flow_action_queue *)(actions[j].conf);
+                       rxq = priv->rx_vq[dest_queue->index];
                        flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
                        memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
-                       action.flow_id = flow->flow_id;
-                       if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-                               if (dpkg_prepare_key_cfg(&priv->extract.qos_key_extract.dpkg,
-                                       (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
-                                       DPAA2_PMD_ERR(
-                                       "Unable to prepare extract parameters");
-                                       return -1;
-                               }
+                       action.flow_id = rxq->flow_id;
 
-                               memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
-                               qos_cfg.discard_on_miss = true;
-                               qos_cfg.keep_entries = true;
-                               qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
-                               ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
-                                               priv->token, &qos_cfg);
-                               if (ret < 0) {
-                                       DPAA2_PMD_ERR(
-                                       "Distribution cannot be configured.(%d)"
-                                       , ret);
-                                       return -1;
-                               }
-                       }
+                       /* Configure FS table first*/
                        if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
+                               dpaa2_flow_fs_table_extracts_log(priv, flow->tc_id);
                                if (dpkg_prepare_key_cfg(
                                &priv->extract.tc_key_extract[flow->tc_id].dpkg,
                                (uint8_t *)(size_t)priv->extract
@@ -3038,115 +3467,134 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
                                        return -1;
                                }
 
-                               memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+                               memset(&tc_cfg, 0,
+                                       sizeof(struct dpni_rx_dist_cfg));
                                tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
-                               tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
                                tc_cfg.key_cfg_iova =
                                        (uint64_t)priv->extract.tc_extract_param[flow->tc_id];
-                               tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
-                               tc_cfg.fs_cfg.keep_entries = true;
-                               ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
-                                                        priv->token,
-                                                        flow->tc_id, &tc_cfg);
+                               tc_cfg.tc = flow->tc_id;
+                               tc_cfg.enable = false;
+                               ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
+                                               priv->token, &tc_cfg);
                                if (ret < 0) {
                                        DPAA2_PMD_ERR(
-                                       "Distribution cannot be configured.(%d)"
-                                       , ret);
+                                               "TC hash cannot be disabled.(%d)",
+                                               ret);
+                                       return -1;
+                               }
+                               tc_cfg.enable = true;
+                               tc_cfg.fs_miss_flow_id =
+                                       dpaa2_flow_miss_flow_id;
+                               ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
+                                                        priv->token, &tc_cfg);
+                               if (ret < 0) {
+                                       DPAA2_PMD_ERR(
+                                               "TC distribution cannot be configured.(%d)",
+                                               ret);
                                        return -1;
                                }
-                       }
-                       /* Configure QoS table first */
-                       memset(&nic_attr, 0, sizeof(struct dpni_attr));
-                       ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
-                                                priv->token, &nic_attr);
-                       if (ret < 0) {
-                               DPAA2_PMD_ERR(
-                               "Failure to get attribute. dpni@%p err code(%d)\n",
-                               dpni, ret);
-                               return ret;
                        }
 
-                       action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
+                       /* Configure QoS table then.*/
+                       if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+                               dpaa2_flow_qos_table_extracts_log(priv);
+                               if (dpkg_prepare_key_cfg(
+                                       &priv->extract.qos_key_extract.dpkg,
+                                       (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
+                                       DPAA2_PMD_ERR(
+                                               "Unable to prepare extract parameters");
+                                       return -1;
+                               }
 
-                       if (!priv->qos_index) {
-                               priv->qos_index = rte_zmalloc(0,
-                                               nic_attr.qos_entries, 64);
-                       }
-                       for (index = 0; index < nic_attr.qos_entries; index++) {
-                               if (!priv->qos_index[index]) {
-                                       priv->qos_index[index] = 1;
-                                       break;
+                               memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
+                               qos_cfg.discard_on_miss = false;
+                               qos_cfg.default_tc = 0;
+                               qos_cfg.keep_entries = true;
+                               qos_cfg.key_cfg_iova =
+                                       (size_t)priv->extract.qos_extract_param;
+                               /* QoS table is effecitive for multiple TCs.*/
+                               if (priv->num_rx_tc > 1) {
+                                       ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
+                                               priv->token, &qos_cfg);
+                                       if (ret < 0) {
+                                               DPAA2_PMD_ERR(
+                                               "RSS QoS table can not be configured(%d)\n",
+                                                       ret);
+                                               return -1;
+                                       }
                                }
                        }
-                       if (index >= nic_attr.qos_entries) {
-                               DPAA2_PMD_ERR("QoS table with %d entries full",
-                                       nic_attr.qos_entries);
-                               return -1;
-                       }
-                       flow->qos_rule.key_size = priv->extract
+
+                       flow->qos_real_key_size = priv->extract
                                .qos_key_extract.key_info.key_total_size;
                        if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
                                if (flow->ipaddr_rule.qos_ipdst_offset >=
                                        flow->ipaddr_rule.qos_ipsrc_offset) {
-                                       flow->qos_rule.key_size =
+                                       flow->qos_real_key_size =
                                                flow->ipaddr_rule.qos_ipdst_offset +
                                                NH_FLD_IPV4_ADDR_SIZE;
                                } else {
-                                       flow->qos_rule.key_size =
+                                       flow->qos_real_key_size =
                                                flow->ipaddr_rule.qos_ipsrc_offset +
                                                NH_FLD_IPV4_ADDR_SIZE;
                                }
-                       } else if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV6_ADDR) {
+                       } else if (flow->ipaddr_rule.ipaddr_type ==
+                               FLOW_IPV6_ADDR) {
                                if (flow->ipaddr_rule.qos_ipdst_offset >=
                                        flow->ipaddr_rule.qos_ipsrc_offset) {
-                                       flow->qos_rule.key_size =
+                                       flow->qos_real_key_size =
                                                flow->ipaddr_rule.qos_ipdst_offset +
                                                NH_FLD_IPV6_ADDR_SIZE;
                                } else {
-                                       flow->qos_rule.key_size =
+                                       flow->qos_real_key_size =
                                                flow->ipaddr_rule.qos_ipsrc_offset +
                                                NH_FLD_IPV6_ADDR_SIZE;
                                }
                        }
-                       ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
+
+                       /* QoS entry added is only effective for multiple TCs.*/
+                       if (priv->num_rx_tc > 1) {
+                               qos_index = flow->tc_id * priv->fs_entries +
+                                       flow->tc_index;
+                               if (qos_index >= priv->qos_entries) {
+                                       DPAA2_PMD_ERR("QoS table with %d entries full",
+                                               priv->qos_entries);
+                                       return -1;
+                               }
+                               flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
+
+                               dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
+
+                               ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
                                                priv->token, &flow->qos_rule,
-                                               flow->tc_id, index,
+                                               flow->tc_id, qos_index,
                                                0, 0);
-                       if (ret < 0) {
-                               DPAA2_PMD_ERR(
-                               "Error in addnig entry to QoS table(%d)", ret);
-                               priv->qos_index[index] = 0;
-                               return ret;
-                       }
-                       flow->qos_index = index;
-
-                       /* Then Configure FS table */
-                       if (!priv->fs_index) {
-                               priv->fs_index = rte_zmalloc(0,
-                                                               nic_attr.fs_entries, 64);
-                       }
-                       for (index = 0; index < nic_attr.fs_entries; index++) {
-                               if (!priv->fs_index[index]) {
-                                       priv->fs_index[index] = 1;
-                                       break;
+                               if (ret < 0) {
+                                       DPAA2_PMD_ERR(
+                                               "Error in addnig entry to QoS table(%d)", ret);
+                                       return ret;
                                }
                        }
-                       if (index >= nic_attr.fs_entries) {
+
+                       if (flow->tc_index >= priv->fs_entries) {
                                DPAA2_PMD_ERR("FS table with %d entries full",
-                                       nic_attr.fs_entries);
+                                       priv->fs_entries);
                                return -1;
                        }
-                       flow->fs_rule.key_size = priv->extract
-                                       .tc_key_extract[attr->group].key_info.key_total_size;
+
+                       flow->fs_real_key_size =
+                               priv->extract.tc_key_extract[flow->tc_id]
+                               .key_info.key_total_size;
+
                        if (flow->ipaddr_rule.ipaddr_type ==
                                FLOW_IPV4_ADDR) {
                                if (flow->ipaddr_rule.fs_ipdst_offset >=
                                        flow->ipaddr_rule.fs_ipsrc_offset) {
-                                       flow->fs_rule.key_size =
+                                       flow->fs_real_key_size =
                                                flow->ipaddr_rule.fs_ipdst_offset +
                                                NH_FLD_IPV4_ADDR_SIZE;
                                } else {
-                                       flow->fs_rule.key_size =
+                                       flow->fs_real_key_size =
                                                flow->ipaddr_rule.fs_ipsrc_offset +
                                                NH_FLD_IPV4_ADDR_SIZE;
                                }
@@ -3154,46 +3602,33 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
                                FLOW_IPV6_ADDR) {
                                if (flow->ipaddr_rule.fs_ipdst_offset >=
                                        flow->ipaddr_rule.fs_ipsrc_offset) {
-                                       flow->fs_rule.key_size =
+                                       flow->fs_real_key_size =
                                                flow->ipaddr_rule.fs_ipdst_offset +
                                                NH_FLD_IPV6_ADDR_SIZE;
                                } else {
-                                       flow->fs_rule.key_size =
+                                       flow->fs_real_key_size =
                                                flow->ipaddr_rule.fs_ipsrc_offset +
                                                NH_FLD_IPV6_ADDR_SIZE;
                                }
                        }
+
+                       flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
+
+                       dpaa2_flow_fs_entry_log("Start add", flow);
+
                        ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
-                                               flow->tc_id, index,
+                                               flow->tc_id, flow->tc_index,
                                                &flow->fs_rule, &action);
                        if (ret < 0) {
                                DPAA2_PMD_ERR(
                                "Error in adding entry to FS table(%d)", ret);
-                               priv->fs_index[index] = 0;
                                return ret;
                        }
-                       flow->fs_index = index;
                        memcpy(&flow->action_cfg, &action,
                                sizeof(struct dpni_fs_action_cfg));
                        break;
                case RTE_FLOW_ACTION_TYPE_RSS:
-                       ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
-                                                priv->token, &nic_attr);
-                       if (ret < 0) {
-                               DPAA2_PMD_ERR(
-                               "Failure to get attribute. dpni@%p err code(%d)\n",
-                               dpni, ret);
-                               return ret;
-                       }
                        rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
-                       for (i = 0; i < (int)rss_conf->queue_num; i++) {
-                               if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) ||
-                                   rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) {
-                                       DPAA2_PMD_ERR(
-                                       "Queue/Group combination are not supported\n");
-                                       return -ENOTSUP;
-                               }
-                       }
 
                        flow->action = RTE_FLOW_ACTION_TYPE_RSS;
                        ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
@@ -3220,18 +3655,17 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
                                return -1;
                        }
 
-                       memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+                       memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
                        tc_cfg.dist_size = rss_conf->queue_num;
-                       tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
                        tc_cfg.key_cfg_iova = (size_t)param;
-                       tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
-
-                       ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
-                                                priv->token, flow->tc_id,
-                                                &tc_cfg);
+                       tc_cfg.enable = true;
+                       tc_cfg.tc = flow->tc_id;
+                       ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
+                                                priv->token, &tc_cfg);
                        if (ret < 0) {
                                DPAA2_PMD_ERR(
-                               "Distribution cannot be configured: %d\n", ret);
+                                       "RSS TC table cannot be configured: %d\n",
+                                       ret);
                                rte_free((void *)param);
                                return -1;
                        }
@@ -3255,41 +3689,33 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
                                                         priv->token, &qos_cfg);
                                if (ret < 0) {
                                        DPAA2_PMD_ERR(
-                                       "Distribution can't be configured %d\n",
+                                       "RSS QoS dist can't be configured-%d\n",
                                        ret);
                                        return -1;
                                }
                        }
 
                        /* Add Rule into QoS table */
-                       if (!priv->qos_index) {
-                               priv->qos_index = rte_zmalloc(0,
-                                               nic_attr.qos_entries, 64);
-                       }
-                       for (index = 0; index < nic_attr.qos_entries; index++) {
-                               if (!priv->qos_index[index]) {
-                                       priv->qos_index[index] = 1;
-                                       break;
-                               }
-                       }
-                       if (index >= nic_attr.qos_entries) {
+                       qos_index = flow->tc_id * priv->fs_entries +
+                               flow->tc_index;
+                       if (qos_index >= priv->qos_entries) {
                                DPAA2_PMD_ERR("QoS table with %d entries full",
-                                       nic_attr.qos_entries);
+                                       priv->qos_entries);
                                return -1;
                        }
-                       flow->qos_rule.key_size =
+
+                       flow->qos_real_key_size =
                          priv->extract.qos_key_extract.key_info.key_total_size;
+                       flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
                        ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
                                                &flow->qos_rule, flow->tc_id,
-                                               index, 0, 0);
+                                               qos_index, 0, 0);
                        if (ret < 0) {
                                DPAA2_PMD_ERR(
                                "Error in entry addition in QoS table(%d)",
                                ret);
-                               priv->qos_index[index] = 0;
                                return ret;
                        }
-                       flow->qos_index = index;
                        break;
                case RTE_FLOW_ACTION_TYPE_END:
                        end_of_list = 1;
@@ -3303,11 +3729,15 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
        }
 
        if (!ret) {
-               ret = dpaa2_flow_entry_update(priv, flow->tc_id);
-               if (ret) {
-                       DPAA2_PMD_ERR("Flow entry update failed.");
+               if (is_keycfg_configured &
+                       (DPAA2_QOS_TABLE_RECONFIGURE |
+                       DPAA2_FS_TABLE_RECONFIGURE)) {
+                       ret = dpaa2_flow_entry_update(priv, flow->tc_id);
+                       if (ret) {
+                               DPAA2_PMD_ERR("Flow entry update failed.");
 
-                       return -1;
+                               return -1;
+                       }
                }
                /* New rules are inserted. */
                if (!curr) {
@@ -3473,6 +3903,23 @@ struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
        size_t key_iova = 0, mask_iova = 0;
        int ret;
 
+       dpaa2_flow_control_log =
+               getenv("DPAA2_FLOW_CONTROL_LOG");
+
+       if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
+               struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+               dpaa2_flow_miss_flow_id =
+                       atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
+               if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
+                       DPAA2_PMD_ERR(
+                               "The missed flow ID %d exceeds the max flow ID %d",
+                               dpaa2_flow_miss_flow_id,
+                               priv->dist_queues - 1);
+                       return NULL;
+               }
+       }
+
        flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
        if (!flow) {
                DPAA2_PMD_ERR("Failure to allocate memory for flow");
@@ -3566,33 +4013,35 @@ int dpaa2_flow_destroy(struct rte_eth_dev *dev,
 
        switch (flow->action) {
        case RTE_FLOW_ACTION_TYPE_QUEUE:
-               /* Remove entry from QoS table first */
-               ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
-                                          &flow->qos_rule);
-               if (ret < 0) {
-                       DPAA2_PMD_ERR(
-                               "Error in adding entry to QoS table(%d)", ret);
-                       goto error;
+               if (priv->num_rx_tc > 1) {
+                       /* Remove entry from QoS table first */
+                       ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
+                                       &flow->qos_rule);
+                       if (ret < 0) {
+                               DPAA2_PMD_ERR(
+                                       "Error in removing entry from QoS table(%d)", ret);
+                               goto error;
+                       }
                }
-               priv->qos_index[flow->qos_index] = 0;
 
                /* Then remove entry from FS table */
                ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
                                           flow->tc_id, &flow->fs_rule);
                if (ret < 0) {
                        DPAA2_PMD_ERR(
-                               "Error in entry addition in FS table(%d)", ret);
+                               "Error in removing entry from FS table(%d)", ret);
                        goto error;
                }
-               priv->fs_index[flow->fs_index] = 0;
                break;
        case RTE_FLOW_ACTION_TYPE_RSS:
-               ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
-                                          &flow->qos_rule);
-               if (ret < 0) {
-                       DPAA2_PMD_ERR(
-                       "Error in entry addition in QoS table(%d)", ret);
-                       goto error;
+               if (priv->num_rx_tc > 1) {
+                       ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
+                                       &flow->qos_rule);
+                       if (ret < 0) {
+                               DPAA2_PMD_ERR(
+                                       "Error in entry addition in QoS table(%d)", ret);
+                               goto error;
+                       }
                }
                break;
        default:
@@ -3603,6 +4052,10 @@ int dpaa2_flow_destroy(struct rte_eth_dev *dev,
        }
 
        LIST_REMOVE(flow, next);
+       rte_free((void *)(size_t)flow->qos_rule.key_iova);
+       rte_free((void *)(size_t)flow->qos_rule.mask_iova);
+       rte_free((void *)(size_t)flow->fs_rule.key_iova);
+       rte_free((void *)(size_t)flow->fs_rule.mask_iova);
        /* Now free the flow */
        rte_free(flow);