net/hns3: fix flow director lock
[dpdk.git] / drivers / net / hns3 / hns3_flow.c
index 06fabcd..4511a49 100644 (file)
@@ -1,9 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2019 Hisilicon Limited.
+ * Copyright(c) 2018-2021 HiSilicon Limited.
  */
 
-#include <stdbool.h>
-#include <sys/queue.h>
 #include <rte_flow_driver.h>
 #include <rte_io.h>
 #include <rte_malloc.h>
@@ -11,6 +9,15 @@
 #include "hns3_ethdev.h"
 #include "hns3_logs.h"
 
+/* Default default keys */
+static uint8_t hns3_hash_key[] = {
+       0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+       0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+       0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+       0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+       0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
+};
+
 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
 
@@ -37,8 +44,7 @@ static enum rte_flow_item_type first_items[] = {
        RTE_FLOW_ITEM_TYPE_NVGRE,
        RTE_FLOW_ITEM_TYPE_VXLAN,
        RTE_FLOW_ITEM_TYPE_GENEVE,
-       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
-       RTE_FLOW_ITEM_TYPE_MPLS
+       RTE_FLOW_ITEM_TYPE_VXLAN_GPE
 };
 
 static enum rte_flow_item_type L2_next_items[] = {
@@ -58,8 +64,7 @@ static enum rte_flow_item_type L3_next_items[] = {
 static enum rte_flow_item_type L4_next_items[] = {
        RTE_FLOW_ITEM_TYPE_VXLAN,
        RTE_FLOW_ITEM_TYPE_GENEVE,
-       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
-       RTE_FLOW_ITEM_TYPE_MPLS
+       RTE_FLOW_ITEM_TYPE_VXLAN_GPE
 };
 
 static enum rte_flow_item_type tunnel_next_items[] = {
@@ -81,6 +86,58 @@ net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
                dst[i] = rte_be_to_cpu_32(src[i]);
 }
 
+/*
+ * This function is used to find rss general action.
+ * 1. As we know RSS is used to spread packets among several queues, the flow
+ *    API provide the struct rte_flow_action_rss, user could config its field
+ *    sush as: func/level/types/key/queue to control RSS function.
+ * 2. The flow API also supports queue region configuration for hns3. It was
+ *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
+ *    which action is RSS queues region.
+ * 3. When action is RSS, we use the following rule to distinguish:
+ *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
+ *            region configuration.
+ *    Case other: an rss general action.
+ */
+static const struct rte_flow_action *
+hns3_find_rss_general_action(const struct rte_flow_item pattern[],
+                            const struct rte_flow_action actions[])
+{
+       const struct rte_flow_action *act = NULL;
+       const struct hns3_rss_conf *rss;
+       bool have_eth = false;
+
+       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+               if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+                       act = actions;
+                       break;
+               }
+       }
+       if (!act)
+               return NULL;
+
+       for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+               if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
+                       have_eth = true;
+                       break;
+               }
+       }
+
+       rss = act->conf;
+       if (have_eth && rss->conf.queue_num) {
+               /*
+                * Pattern have ETH and action's queue_num > 0, indicate this is
+                * queue region configuration.
+                * Because queue region is implemented by FDIR + RSS in hns3
+                * hardware, it needs to enter FDIR process, so here return NULL
+                * to avoid enter RSS process.
+                */
+               return NULL;
+       }
+
+       return act;
+}
+
 static inline struct hns3_flow_counter *
 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
 {
@@ -101,23 +158,33 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
 {
        struct hns3_adapter *hns = dev->data->dev_private;
        struct hns3_pf *pf = &hns->pf;
+       struct hns3_hw *hw = &hns->hw;
        struct hns3_flow_counter *cnt;
+       uint64_t value;
+       int ret;
 
        cnt = hns3_counter_lookup(dev, id);
        if (cnt) {
                if (!cnt->shared || cnt->shared != shared)
                        return rte_flow_error_set(error, ENOTSUP,
-                                                 RTE_FLOW_ERROR_TYPE_ACTION,
-                                                 cnt,
-                                                 "Counter id is used,shared flag not match");
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                               cnt,
+                               "Counter id is used, shared flag not match");
                cnt->ref_cnt++;
                return 0;
        }
 
+       /* Clear the counter by read ops because the counter is read-clear */
+       ret = hns3_get_count(hw, id, &value);
+       if (ret)
+               return rte_flow_error_set(error, EIO,
+                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                         "Clear counter failed!");
+
        cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
        if (cnt == NULL)
                return rte_flow_error_set(error, ENOMEM,
-                                         RTE_FLOW_ERROR_TYPE_ACTION, cnt,
+                                         RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
                                          "Alloc mem for counter failed");
        cnt->id = id;
        cnt->shared = shared;
@@ -145,13 +212,12 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
        cnt = hns3_counter_lookup(dev, flow->counter_id);
        if (cnt == NULL)
                return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
                                          "Can't find counter id");
 
        ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
        if (ret) {
-               rte_flow_error_set(error, -ret,
-                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+               rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
                                   NULL, "Read counter fail.");
                return ret;
        }
@@ -203,15 +269,62 @@ hns3_handle_action_queue(struct rte_eth_dev *dev,
                         struct rte_flow_error *error)
 {
        struct hns3_adapter *hns = dev->data->dev_private;
-       struct hns3_hw *hw = &hns->hw;
        const struct rte_flow_action_queue *queue;
+       struct hns3_hw *hw = &hns->hw;
 
        queue = (const struct rte_flow_action_queue *)action->conf;
-       if (queue->index >= hw->data->nb_rx_queues)
+       if (queue->index >= hw->used_rx_queues) {
+               hns3_err(hw, "queue ID(%u) is greater than number of "
+                         "available queue (%u) in driver.",
+                         queue->index, hw->used_rx_queues);
                return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
-                                         "Invalid queue ID in PF");
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         action, "Invalid queue ID in PF");
+       }
+
        rule->queue_id = queue->index;
+       rule->nb_queues = 1;
+       rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
+       return 0;
+}
+
+static int
+hns3_handle_action_queue_region(struct rte_eth_dev *dev,
+                               const struct rte_flow_action *action,
+                               struct hns3_fdir_rule *rule,
+                               struct rte_flow_error *error)
+{
+       struct hns3_adapter *hns = dev->data->dev_private;
+       const struct rte_flow_action_rss *conf = action->conf;
+       struct hns3_hw *hw = &hns->hw;
+       uint16_t idx;
+
+       if (!hns3_dev_fd_queue_region_supported(hw))
+               return rte_flow_error_set(error, ENOTSUP,
+                       RTE_FLOW_ERROR_TYPE_ACTION, action,
+                       "Not support config queue region!");
+
+       if ((!rte_is_power_of_2(conf->queue_num)) ||
+               conf->queue_num > hw->rss_size_max ||
+               conf->queue[0] >= hw->used_rx_queues ||
+               conf->queue[0] + conf->queue_num > hw->used_rx_queues) {
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
+                       "Invalid start queue ID and queue num! the start queue "
+                       "ID must valid, the queue num must be power of 2 and "
+                       "<= rss_size_max.");
+       }
+
+       for (idx = 1; idx < conf->queue_num; idx++) {
+               if (conf->queue[idx] != conf->queue[idx - 1] + 1)
+                       return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
+                               "Invalid queue ID sequence! the queue ID "
+                               "must be continuous increment.");
+       }
+
+       rule->queue_id = conf->queue[0];
+       rule->nb_queues = conf->queue_num;
        rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
        return 0;
 }
@@ -248,14 +361,27 @@ hns3_handle_actions(struct rte_eth_dev *dev,
                case RTE_FLOW_ACTION_TYPE_DROP:
                        rule->action = HNS3_FD_ACTION_DROP_PACKET;
                        break;
+               /*
+                * Here RSS's real action is queue region.
+                * Queue region is implemented by FDIR + RSS in hns3 hardware,
+                * the FDIR's action is one queue region (start_queue_id and
+                * queue_num), then RSS spread packets to the queue region by
+                * RSS algorigthm.
+                */
+               case RTE_FLOW_ACTION_TYPE_RSS:
+                       ret = hns3_handle_action_queue_region(dev, actions,
+                                                             rule, error);
+                       if (ret)
+                               return ret;
+                       break;
                case RTE_FLOW_ACTION_TYPE_MARK:
                        mark =
                            (const struct rte_flow_action_mark *)actions->conf;
                        if (mark->id >= HNS3_MAX_FILTER_ID)
                                return rte_flow_error_set(error, EINVAL,
-                                                    RTE_FLOW_ERROR_TYPE_ACTION,
-                                                    actions,
-                                                    "Invalid Mark ID");
+                                               RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                               actions,
+                                               "Invalid Mark ID");
                        rule->fd_id = mark->id;
                        rule->flags |= HNS3_RULE_FLAG_FDID;
                        break;
@@ -269,9 +395,9 @@ hns3_handle_actions(struct rte_eth_dev *dev,
                        counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
                        if (act_count->id >= counter_num)
                                return rte_flow_error_set(error, EINVAL,
-                                                    RTE_FLOW_ERROR_TYPE_ACTION,
-                                                    actions,
-                                                    "Invalid counter id");
+                                               RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                               actions,
+                                               "Invalid counter id");
                        rule->act_cnt = *act_count;
                        rule->flags |= HNS3_RULE_FLAG_COUNTER;
                        break;
@@ -287,7 +413,6 @@ hns3_handle_actions(struct rte_eth_dev *dev,
        return 0;
 }
 
-/* Parse to get the attr and action info of flow director rule. */
 static int
 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
 {
@@ -315,17 +440,12 @@ hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
 }
 
 static int
-hns3_parse_eth(const struct rte_flow_item *item,
-                  struct hns3_fdir_rule *rule, struct rte_flow_error *error)
+hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+              struct rte_flow_error *error __rte_unused)
 {
        const struct rte_flow_item_eth *eth_spec;
        const struct rte_flow_item_eth *eth_mask;
 
-       if (item->spec == NULL && item->mask)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Can't configure FDIR with mask but without spec");
-
        /* Only used to describe the protocol stack. */
        if (item->spec == NULL && item->mask == NULL)
                return 0;
@@ -365,11 +485,6 @@ hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        const struct rte_flow_item_vlan *vlan_spec;
        const struct rte_flow_item_vlan *vlan_mask;
 
-       if (item->spec == NULL && item->mask)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Can't configure FDIR with mask but without spec");
-
        rule->key_conf.vlan_num++;
        if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
                return rte_flow_error_set(error, EINVAL,
@@ -407,6 +522,17 @@ hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        return 0;
 }
 
+static bool
+hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+       if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
+           ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
+           ipv4_mask->hdr.hdr_checksum)
+               return false;
+
+       return true;
+}
+
 static int
 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
                struct rte_flow_error *error)
@@ -414,28 +540,19 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        const struct rte_flow_item_ipv4 *ipv4_spec;
        const struct rte_flow_item_ipv4 *ipv4_mask;
 
-       if (item->spec == NULL && item->mask)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Can't configure FDIR with mask but without spec");
-
        hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
        rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
        rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
+
        /* Only used to describe the protocol stack. */
        if (item->spec == NULL && item->mask == NULL)
                return 0;
 
        if (item->mask) {
                ipv4_mask = item->mask;
-
-               if (ipv4_mask->hdr.total_length ||
-                   ipv4_mask->hdr.packet_id ||
-                   ipv4_mask->hdr.fragment_offset ||
-                   ipv4_mask->hdr.time_to_live ||
-                   ipv4_mask->hdr.hdr_checksum) {
+               if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
                        return rte_flow_error_set(error, EINVAL,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
                                                  item,
                                                  "Only support src & dst ip,tos,proto in IPV4");
                }
@@ -482,11 +599,6 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        const struct rte_flow_item_ipv6 *ipv6_spec;
        const struct rte_flow_item_ipv6 *ipv6_mask;
 
-       if (item->spec == NULL && item->mask)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Can't configure FDIR with mask but without spec");
-
        hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
        rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
        rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
@@ -497,10 +609,10 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
        if (item->mask) {
                ipv6_mask = item->mask;
-               if (ipv6_mask->hdr.vtc_flow ||
-                   ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) {
+               if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
+                   ipv6_mask->hdr.hop_limits) {
                        return rte_flow_error_set(error, EINVAL,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
                                                  item,
                                                  "Only support src & dst ip,proto in IPV6");
                }
@@ -531,6 +643,18 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        return 0;
 }
 
+static bool
+hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
+{
+       if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
+           tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
+           tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
+           tcp_mask->hdr.tcp_urp)
+               return false;
+
+       return true;
+}
+
 static int
 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
               struct rte_flow_error *error)
@@ -538,11 +662,6 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        const struct rte_flow_item_tcp *tcp_spec;
        const struct rte_flow_item_tcp *tcp_mask;
 
-       if (item->spec == NULL && item->mask)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Can't configure FDIR with mask but without spec");
-
        hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
        rule->key_conf.spec.ip_proto = IPPROTO_TCP;
        rule->key_conf.mask.ip_proto = IPPROTO_MASK;
@@ -553,14 +672,9 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
        if (item->mask) {
                tcp_mask = item->mask;
-               if (tcp_mask->hdr.sent_seq ||
-                   tcp_mask->hdr.recv_ack ||
-                   tcp_mask->hdr.data_off ||
-                   tcp_mask->hdr.tcp_flags ||
-                   tcp_mask->hdr.rx_win ||
-                   tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) {
+               if (!hns3_check_tcp_mask_supported(tcp_mask)) {
                        return rte_flow_error_set(error, EINVAL,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
                                                  item,
                                                  "Only support src & dst port in TCP");
                }
@@ -591,14 +705,10 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        const struct rte_flow_item_udp *udp_spec;
        const struct rte_flow_item_udp *udp_mask;
 
-       if (item->spec == NULL && item->mask)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Can't configure FDIR with mask but without spec");
-
        hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
        rule->key_conf.spec.ip_proto = IPPROTO_UDP;
        rule->key_conf.mask.ip_proto = IPPROTO_MASK;
+
        /* Only used to describe the protocol stack. */
        if (item->spec == NULL && item->mask == NULL)
                return 0;
@@ -607,7 +717,7 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
                udp_mask = item->mask;
                if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
                        return rte_flow_error_set(error, EINVAL,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
                                                  item,
                                                  "Only support src & dst port in UDP");
                }
@@ -637,11 +747,6 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        const struct rte_flow_item_sctp *sctp_spec;
        const struct rte_flow_item_sctp *sctp_mask;
 
-       if (item->spec == NULL && item->mask)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Can't configure FDIR with mask but without spec");
-
        hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
        rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
        rule->key_conf.mask.ip_proto = IPPROTO_MASK;
@@ -654,10 +759,9 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
                sctp_mask = item->mask;
                if (sctp_mask->hdr.cksum)
                        return rte_flow_error_set(error, EINVAL,
-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
                                                  item,
                                                  "Only support src & dst port in SCTP");
-
                if (sctp_mask->hdr.src_port) {
                        hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
                        rule->key_conf.mask.src_port =
@@ -686,7 +790,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 }
 
 /*
- * Check items before tunnel, save inner configs to outer configs,and clear
+ * Check items before tunnel, save inner configs to outer configs, and clear
  * inner configs.
  * The key consists of two parts: meta_data and tuple keys.
  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
@@ -774,15 +878,6 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        const struct rte_flow_item_vxlan *vxlan_spec;
        const struct rte_flow_item_vxlan *vxlan_mask;
 
-       if (item->spec == NULL && item->mask)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Can't configure FDIR with mask but without spec");
-       else if (item->spec && (item->mask == NULL))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Tunnel packets must configure with mask");
-
        hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
        rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
        if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
@@ -799,14 +894,14 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
        if (vxlan_mask->flags)
                return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
                                          "Flags is not supported in VxLAN");
 
        /* VNI must be totally masked or not. */
        if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
            memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
                return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
                                          "VNI must be totally masked or not in VxLAN");
        if (vxlan_mask->vni[0]) {
                hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
@@ -825,15 +920,6 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        const struct rte_flow_item_nvgre *nvgre_spec;
        const struct rte_flow_item_nvgre *nvgre_mask;
 
-       if (item->spec == NULL && item->mask)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Can't configure FDIR with mask but without spec");
-       else if (item->spec && (item->mask == NULL))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Tunnel packets must configure with mask");
-
        hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
        rule->key_conf.spec.outer_proto = IPPROTO_GRE;
        rule->key_conf.mask.outer_proto = IPPROTO_MASK;
@@ -850,14 +936,14 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
        if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
                return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
                                          "Ver/protocal is not supported in NVGRE");
 
        /* TNI must be totally masked or not. */
        if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
            memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
                return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
                                          "TNI must be totally masked or not in NVGRE");
 
        if (nvgre_mask->tni[0]) {
@@ -883,15 +969,6 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        const struct rte_flow_item_geneve *geneve_spec;
        const struct rte_flow_item_geneve *geneve_mask;
 
-       if (item->spec == NULL && item->mask)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Can't configure FDIR with mask but without spec");
-       else if (item->spec && (item->mask == NULL))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "Tunnel packets must configure with mask");
-
        hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
        rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
        rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
@@ -904,13 +981,13 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
        if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
                return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
                                          "Ver/protocal is not supported in GENEVE");
        /* VNI must be totally masked or not. */
        if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
            memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
                return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
                                          "VNI must be totally masked or not in GENEVE");
        if (geneve_mask->vni[0]) {
                hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
@@ -928,6 +1005,17 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 {
        int ret;
 
+       if (item->spec == NULL && item->mask)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "Can't configure FDIR with mask "
+                                         "but without spec");
+       else if (item->spec && (item->mask == NULL))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "Tunnel packets must configure "
+                                         "with mask");
+
        switch (item->type) {
        case RTE_FLOW_ITEM_TYPE_VXLAN:
        case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
@@ -941,7 +1029,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
                break;
        default:
                return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_HANDLE,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
                                          NULL, "Unsupported tunnel type!");
        }
        if (ret)
@@ -950,13 +1038,18 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 }
 
 static int
-hns3_parse_normal(const struct rte_flow_item *item,
-                 struct hns3_fdir_rule *rule,
+hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
                  struct items_step_mngr *step_mngr,
                  struct rte_flow_error *error)
 {
        int ret;
 
+       if (item->spec == NULL && item->mask)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "Can't configure FDIR with mask "
+                                         "but without spec");
+
        switch (item->type) {
        case RTE_FLOW_ITEM_TYPE_ETH:
                ret = hns3_parse_eth(item, rule, error);
@@ -995,7 +1088,7 @@ hns3_parse_normal(const struct rte_flow_item *item,
                break;
        default:
                return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_HANDLE,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
                                          NULL, "Unsupported normal type!");
        }
 
@@ -1011,7 +1104,7 @@ hns3_validate_item(const struct rte_flow_item *item,
 
        if (item->last)
                return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
                                          "Not supported last point for range");
 
        for (i = 0; i < step_mngr.count; i++) {
@@ -1033,8 +1126,7 @@ is_tunnel_packet(enum rte_flow_item_type type)
        if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
            type == RTE_FLOW_ITEM_TYPE_VXLAN ||
            type == RTE_FLOW_ITEM_TYPE_NVGRE ||
-           type == RTE_FLOW_ITEM_TYPE_GENEVE ||
-           type == RTE_FLOW_ITEM_TYPE_MPLS)
+           type == RTE_FLOW_ITEM_TYPE_GENEVE)
                return true;
        return false;
 }
@@ -1095,11 +1187,6 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
                                          "Fdir not supported in VF");
 
-       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
-                                         "fdir_conf.mode isn't perfect");
-
        step_mngr.items = first_items;
        step_mngr.count = ARRAY_SIZE(first_items);
        for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
@@ -1127,11 +1214,21 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev,
 }
 
 void
-hns3_filterlist_init(struct rte_eth_dev *dev)
+hns3_flow_init(struct rte_eth_dev *dev)
 {
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct hns3_process_private *process_list = dev->process_private;
+       pthread_mutexattr_t attr;
+
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+               pthread_mutexattr_init(&attr);
+               pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
+               pthread_mutex_init(&hw->flows_lock, &attr);
+               dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
+       }
 
        TAILQ_INIT(&process_list->fdir_list);
+       TAILQ_INIT(&process_list->filter_rss_list);
        TAILQ_INIT(&process_list->flow_list);
 }
 
@@ -1140,6 +1237,7 @@ hns3_filterlist_flush(struct rte_eth_dev *dev)
 {
        struct hns3_process_private *process_list = dev->process_private;
        struct hns3_fdir_rule_ele *fdir_rule_ptr;
+       struct hns3_rss_conf_ele *rss_filter_ptr;
        struct hns3_flow_mem *flow_node;
 
        fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
@@ -1149,6 +1247,14 @@ hns3_filterlist_flush(struct rte_eth_dev *dev)
                fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
        }
 
+       rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
+       while (rss_filter_ptr) {
+               TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
+                            entries);
+               rte_free(rss_filter_ptr);
+               rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
+       }
+
        flow_node = TAILQ_FIRST(&process_list->flow_list);
        while (flow_node) {
                TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
@@ -1158,6 +1264,431 @@ hns3_filterlist_flush(struct rte_eth_dev *dev)
        }
 }
 
+static bool
+hns3_action_rss_same(const struct rte_flow_action_rss *comp,
+                    const struct rte_flow_action_rss *with)
+{
+       bool func_is_same;
+
+       /*
+        * When user flush all RSS rule, RSS func is set invalid with
+        * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
+        * flushed, any validate RSS func is different with it before
+        * flushed. Others, when user create an action RSS with RSS func
+        * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
+        * between continuous RSS flow.
+        */
+       if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
+               func_is_same = false;
+       else
+               func_is_same = with->func ? (comp->func == with->func) : true;
+
+       return (func_is_same &&
+               comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
+               comp->level == with->level && comp->key_len == with->key_len &&
+               comp->queue_num == with->queue_num &&
+               !memcmp(comp->key, with->key, with->key_len) &&
+               !memcmp(comp->queue, with->queue,
+                       sizeof(*with->queue) * with->queue_num));
+}
+
+static int
+hns3_rss_conf_copy(struct hns3_rss_conf *out,
+                  const struct rte_flow_action_rss *in)
+{
+       if (in->key_len > RTE_DIM(out->key) ||
+           in->queue_num > RTE_DIM(out->queue))
+               return -EINVAL;
+       if (in->key == NULL && in->key_len)
+               return -EINVAL;
+       out->conf = (struct rte_flow_action_rss) {
+               .func = in->func,
+               .level = in->level,
+               .types = in->types,
+               .key_len = in->key_len,
+               .queue_num = in->queue_num,
+       };
+       out->conf.queue = memcpy(out->queue, in->queue,
+                               sizeof(*in->queue) * in->queue_num);
+       if (in->key)
+               out->conf.key = memcpy(out->key, in->key, in->key_len);
+
+       return 0;
+}
+
+static bool
+hns3_rss_input_tuple_supported(struct hns3_hw *hw,
+                              const struct rte_flow_action_rss *rss)
+{
+       /*
+        * For IP packet, it is not supported to use src/dst port fields to RSS
+        * hash for the following packet types.
+        * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
+        * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
+        * port fields to RSS hash for IPV6 SCTP packet type. However, the
+        * Kunpeng930 and future kunpeng series support to use src/dst port
+        * fields to RSS hash for IPv6 SCTP packet type.
+        */
+       if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
+           (rss->types & ETH_RSS_IP ||
+           (!hw->rss_info.ipv6_sctp_offload_supported &&
+           rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
+               return false;
+
+       return true;
+}
+
+/*
+ * This function is used to parse rss action validatation.
+ */
+static int
+hns3_parse_rss_filter(struct rte_eth_dev *dev,
+                     const struct rte_flow_action *actions,
+                     struct rte_flow_error *error)
+{
+       struct hns3_adapter *hns = dev->data->dev_private;
+       struct hns3_hw *hw = &hns->hw;
+       struct hns3_rss_conf *rss_conf = &hw->rss_info;
+       const struct rte_flow_action_rss *rss;
+       const struct rte_flow_action *act;
+       uint32_t act_index = 0;
+       uint16_t n;
+
+       NEXT_ITEM_OF_ACTION(act, actions, act_index);
+       rss = act->conf;
+
+       if (rss == NULL) {
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         act, "no valid queues");
+       }
+
+       if (rss->queue_num > RTE_DIM(rss_conf->queue))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                                         "queue number configured exceeds "
+                                         "queue buffer size driver supported");
+
+       for (n = 0; n < rss->queue_num; n++) {
+               if (rss->queue[n] < hw->alloc_rss_size)
+                       continue;
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                                         "queue id must be less than queue number allocated to a TC");
+       }
+
+       if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         act,
+                                         "Flow types is unsupported by "
+                                         "hns3's RSS");
+       if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                                         "RSS hash func are not supported");
+       if (rss->level)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                                         "a nonzero RSS encapsulation level is not supported");
+       if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+                                         "RSS hash key must be exactly 40 bytes");
+
+       if (!hns3_rss_input_tuple_supported(hw, rss))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->types,
+                                         "input RSS types are not supported");
+
+       act_index++;
+
+       /* Check if the next not void action is END */
+       NEXT_ITEM_OF_ACTION(act, actions, act_index);
+       if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+               memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION,
+                                         act, "Not supported action.");
+       }
+
+       return 0;
+}
+
+static int
+hns3_disable_rss(struct hns3_hw *hw)
+{
+       int ret;
+
+       /* Redirected the redirection table to queue 0 */
+       ret = hns3_rss_reset_indir_table(hw);
+       if (ret)
+               return ret;
+
+       /* Disable RSS */
+       hw->rss_info.conf.types = 0;
+       hw->rss_dis_flag = true;
+
+       return 0;
+}
+
+static void
+hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
+{
+       if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
+               hns3_warn(hw, "Default RSS hash key to be set");
+               rss_conf->key = hns3_hash_key;
+               rss_conf->key_len = HNS3_RSS_KEY_SIZE;
+       }
+}
+
+static int
+hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
+                        uint8_t *hash_algo)
+{
+       enum rte_eth_hash_function algo_func = *func;
+       switch (algo_func) {
+       case RTE_ETH_HASH_FUNCTION_DEFAULT:
+               /* Keep *hash_algo as what it used to be */
+               algo_func = hw->rss_info.conf.func;
+               break;
+       case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
+               *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
+               break;
+       case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
+               *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
+               break;
+       case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
+               *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
+               break;
+       default:
+               hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
+                        algo_func);
+               return -EINVAL;
+       }
+       *func = algo_func;
+
+       return 0;
+}
+
+static int
+hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
+{
+       struct hns3_rss_tuple_cfg *tuple;
+       int ret;
+
+       hns3_parse_rss_key(hw, rss_config);
+
+       ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
+                                      &hw->rss_info.hash_algo);
+       if (ret)
+               return ret;
+
+       ret = hns3_rss_set_algo_key(hw, rss_config->key);
+       if (ret)
+               return ret;
+
+       hw->rss_info.conf.func = rss_config->func;
+
+       tuple = &hw->rss_info.rss_tuple_sets;
+       ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
+       if (ret)
+               hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
+
+       return ret;
+}
+
+static int
+hns3_update_indir_table(struct rte_eth_dev *dev,
+                       const struct rte_flow_action_rss *conf, uint16_t num)
+{
+       struct hns3_adapter *hns = dev->data->dev_private;
+       struct hns3_hw *hw = &hns->hw;
+       uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
+       uint16_t j;
+       uint32_t i;
+
+       /* Fill in redirection table */
+       memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
+              sizeof(hw->rss_info.rss_indirection_tbl));
+       for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
+               j %= num;
+               if (conf->queue[j] >= hw->alloc_rss_size) {
+                       hns3_err(hw, "queue id(%u) set to redirection table "
+                                "exceeds queue number(%u) allocated to a TC.",
+                                conf->queue[j], hw->alloc_rss_size);
+                       return -EINVAL;
+               }
+               indir_tbl[i] = conf->queue[j];
+       }
+
+       return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
+}
+
+static int
+hns3_config_rss_filter(struct rte_eth_dev *dev,
+                      const struct hns3_rss_conf *conf, bool add)
+{
+       struct hns3_process_private *process_list = dev->process_private;
+       struct hns3_adapter *hns = dev->data->dev_private;
+       struct hns3_rss_conf_ele *rss_filter_ptr;
+       struct hns3_hw *hw = &hns->hw;
+       struct hns3_rss_conf *rss_info;
+       uint64_t flow_types;
+       uint16_t num;
+       int ret;
+
+       struct rte_flow_action_rss rss_flow_conf = {
+               .func = conf->conf.func,
+               .level = conf->conf.level,
+               .types = conf->conf.types,
+               .key_len = conf->conf.key_len,
+               .queue_num = conf->conf.queue_num,
+               .key = conf->conf.key_len ?
+                   (void *)(uintptr_t)conf->conf.key : NULL,
+               .queue = conf->conf.queue,
+       };
+
+       /* Filter the unsupported flow types */
+       flow_types = conf->conf.types ?
+                    rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
+                    hw->rss_info.conf.types;
+       if (flow_types != rss_flow_conf.types)
+               hns3_warn(hw, "modified RSS types based on hardware support, "
+                             "requested:%" PRIx64 " configured:%" PRIx64,
+                         rss_flow_conf.types, flow_types);
+       /* Update the useful flow types */
+       rss_flow_conf.types = flow_types;
+
+       rss_info = &hw->rss_info;
+       if (!add) {
+               if (!conf->valid)
+                       return 0;
+
+               ret = hns3_disable_rss(hw);
+               if (ret) {
+                       hns3_err(hw, "RSS disable failed(%d)", ret);
+                       return ret;
+               }
+
+               if (rss_flow_conf.queue_num) {
+                       /*
+                        * Due the content of queue pointer have been reset to
+                        * 0, the rss_info->conf.queue should be set to NULL
+                        */
+                       rss_info->conf.queue = NULL;
+                       rss_info->conf.queue_num = 0;
+               }
+
+               /* set RSS func invalid after flushed */
+               rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
+               return 0;
+       }
+
+       /* Set rx queues to use */
+       num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
+       if (rss_flow_conf.queue_num > num)
+               hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
+                         rss_flow_conf.queue_num);
+       hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
+
+       rte_spinlock_lock(&hw->lock);
+       if (num) {
+               ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
+               if (ret)
+                       goto rss_config_err;
+       }
+
+       /* Set hash algorithm and flow types by the user's config */
+       ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
+       if (ret)
+               goto rss_config_err;
+
+       ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
+       if (ret) {
+               hns3_err(hw, "RSS config init fail(%d)", ret);
+               goto rss_config_err;
+       }
+
+       /*
+        * When create a new RSS rule, the old rule will be overlaid and set
+        * invalid.
+        */
+       TAILQ_FOREACH(rss_filter_ptr, &process_list->filter_rss_list, entries)
+               rss_filter_ptr->filter_info.valid = false;
+
+rss_config_err:
+       rte_spinlock_unlock(&hw->lock);
+
+       return ret;
+}
+
+static int
+hns3_clear_rss_filter(struct rte_eth_dev *dev)
+{
+       struct hns3_process_private *process_list = dev->process_private;
+       struct hns3_adapter *hns = dev->data->dev_private;
+       struct hns3_rss_conf_ele *rss_filter_ptr;
+       struct hns3_hw *hw = &hns->hw;
+       int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
+       int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
+       int ret = 0;
+
+       rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
+       while (rss_filter_ptr) {
+               TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
+                            entries);
+               ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
+                                            false);
+               if (ret)
+                       rss_rule_fail_cnt++;
+               else
+                       rss_rule_succ_cnt++;
+               rte_free(rss_filter_ptr);
+               rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
+       }
+
+       if (rss_rule_fail_cnt) {
+               hns3_err(hw, "fail to delete all RSS filters, success num = %d "
+                            "fail num = %d", rss_rule_succ_cnt,
+                            rss_rule_fail_cnt);
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
+int
+hns3_restore_rss_filter(struct rte_eth_dev *dev)
+{
+       struct hns3_adapter *hns = dev->data->dev_private;
+       struct hns3_hw *hw = &hns->hw;
+
+       /* When user flush all rules, it doesn't need to restore RSS rule */
+       if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
+               return 0;
+
+       return hns3_config_rss_filter(dev, &hw->rss_info, true);
+}
+
+static int
+hns3_flow_parse_rss(struct rte_eth_dev *dev,
+                   const struct hns3_rss_conf *conf, bool add)
+{
+       struct hns3_adapter *hns = dev->data->dev_private;
+       struct hns3_hw *hw = &hns->hw;
+       bool ret;
+
+       ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
+       if (ret) {
+               hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
+               return -EINVAL;
+       }
+
+       return hns3_config_rss_filter(dev, conf, add);
+}
+
 static int
 hns3_flow_args_check(const struct rte_flow_attr *attr,
                     const struct rte_flow_item pattern[],
@@ -1200,6 +1731,9 @@ hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        if (ret)
                return ret;
 
+       if (hns3_find_rss_general_action(pattern, actions))
+               return hns3_parse_rss_filter(dev, actions, error);
+
        memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
        return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
 }
@@ -1207,7 +1741,7 @@ hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 /*
  * Create or destroy a flow rule.
  * Theorically one rule can match more than one filters.
- * We will let it use the filter which it hitt first.
+ * We will let it use the filter which it hit first.
  * So, the sequence matters.
  */
 static struct rte_flow *
@@ -1219,29 +1753,30 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        struct hns3_process_private *process_list = dev->process_private;
        struct hns3_adapter *hns = dev->data->dev_private;
        struct hns3_hw *hw = &hns->hw;
+       const struct hns3_rss_conf *rss_conf;
        struct hns3_fdir_rule_ele *fdir_rule_ptr;
+       struct hns3_rss_conf_ele *rss_filter_ptr;
        struct hns3_flow_mem *flow_node;
+       const struct rte_flow_action *act;
        struct rte_flow *flow;
        struct hns3_fdir_rule fdir_rule;
        int ret;
 
-       ret = hns3_flow_args_check(attr, pattern, actions, error);
+       ret = hns3_flow_validate(dev, attr, pattern, actions, error);
        if (ret)
                return NULL;
 
        flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
        if (flow == NULL) {
-               rte_flow_error_set(error, ENOMEM,
-                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-                                  "Failed to allocate flow memory");
+               rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+                                  NULL, "Failed to allocate flow memory");
                return NULL;
        }
        flow_node = rte_zmalloc("hns3 flow node",
                                sizeof(struct hns3_flow_mem), 0);
        if (flow_node == NULL) {
-               rte_flow_error_set(error, ENOMEM,
-                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-                                  "Failed to allocate flow list memory");
+               rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+                                  NULL, "Failed to allocate flow list memory");
                rte_free(flow);
                return NULL;
        }
@@ -1249,6 +1784,34 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        flow_node->flow = flow;
        TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
 
+       act = hns3_find_rss_general_action(pattern, actions);
+       if (act) {
+               rss_conf = act->conf;
+
+               ret = hns3_flow_parse_rss(dev, rss_conf, true);
+               if (ret)
+                       goto err;
+
+               rss_filter_ptr = rte_zmalloc("hns3 rss filter",
+                                            sizeof(struct hns3_rss_conf_ele),
+                                            0);
+               if (rss_filter_ptr == NULL) {
+                       hns3_err(hw,
+                                   "Failed to allocate hns3_rss_filter memory");
+                       ret = -ENOMEM;
+                       goto err;
+               }
+               hns3_rss_conf_copy(&rss_filter_ptr->filter_info,
+                                  &rss_conf->conf);
+               rss_filter_ptr->filter_info.valid = true;
+               TAILQ_INSERT_TAIL(&process_list->filter_rss_list,
+                                 rss_filter_ptr, entries);
+
+               flow->rule = rss_filter_ptr;
+               flow->filter_type = RTE_ETH_FILTER_HASH;
+               return flow;
+       }
+
        memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
        ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
        if (ret)
@@ -1262,16 +1825,18 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 
                flow->counter_id = fdir_rule.act_cnt.id;
        }
+
+       fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
+                                   sizeof(struct hns3_fdir_rule_ele),
+                                   0);
+       if (fdir_rule_ptr == NULL) {
+               hns3_err(hw, "failed to allocate fdir_rule memory.");
+               ret = -ENOMEM;
+               goto err_fdir;
+       }
+
        ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
        if (!ret) {
-               fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
-                                           sizeof(struct hns3_fdir_rule_ele),
-                                           0);
-               if (fdir_rule_ptr == NULL) {
-                       hns3_err(hw, "Failed to allocate fdir_rule memory");
-                       ret = -ENOMEM;
-                       goto err_fdir;
-               }
                memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
                        sizeof(struct hns3_fdir_rule));
                TAILQ_INSERT_TAIL(&process_list->fdir_list,
@@ -1282,10 +1847,11 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                return flow;
        }
 
+       rte_free(fdir_rule_ptr);
 err_fdir:
        if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
                hns3_counter_release(dev, fdir_rule.act_cnt.id);
-
+err:
        rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
                           "Failed to create flow");
 out:
@@ -1303,6 +1869,7 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
        struct hns3_process_private *process_list = dev->process_private;
        struct hns3_adapter *hns = dev->data->dev_private;
        struct hns3_fdir_rule_ele *fdir_rule_ptr;
+       struct hns3_rss_conf_ele *rss_filter_ptr;
        struct hns3_flow_mem *flow_node;
        enum rte_filter_type filter_type;
        struct hns3_fdir_rule fdir_rule;
@@ -1312,6 +1879,7 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_HANDLE,
                                          flow, "Flow is NULL");
+
        filter_type = flow->filter_type;
        switch (filter_type) {
        case RTE_ETH_FILTER_FDIR:
@@ -1331,6 +1899,20 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
                rte_free(fdir_rule_ptr);
                fdir_rule_ptr = NULL;
                break;
+       case RTE_ETH_FILTER_HASH:
+               rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
+               ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
+                                            false);
+               if (ret)
+                       return rte_flow_error_set(error, EIO,
+                                                 RTE_FLOW_ERROR_TYPE_HANDLE,
+                                                 flow,
+                                                 "Destroy RSS fail.Try again");
+               TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
+                            entries);
+               rte_free(rss_filter_ptr);
+               rss_filter_ptr = NULL;
+               break;
        default:
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_HANDLE, flow,
@@ -1371,6 +1953,13 @@ hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
                hns3_counter_flush(dev);
        }
 
+       ret = hns3_clear_rss_filter(dev);
+       if (ret) {
+               rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+                                  NULL, "Failed to flush rss filter");
+               return ret;
+       }
+
        hns3_filterlist_flush(dev);
 
        return 0;
@@ -1382,9 +1971,15 @@ hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
                const struct rte_flow_action *actions, void *data,
                struct rte_flow_error *error)
 {
+       struct rte_flow_action_rss *rss_conf;
+       struct hns3_rss_conf_ele *rss_rule;
        struct rte_flow_query_count *qc;
        int ret;
 
+       if (!flow->rule)
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
+
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
                case RTE_FLOW_ACTION_TYPE_VOID:
@@ -1395,55 +1990,121 @@ hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
                        if (ret)
                                return ret;
                        break;
+               case RTE_FLOW_ACTION_TYPE_RSS:
+                       if (flow->filter_type != RTE_ETH_FILTER_HASH) {
+                               return rte_flow_error_set(error, ENOTSUP,
+                                       RTE_FLOW_ERROR_TYPE_ACTION,
+                                       actions, "action is not supported");
+                       }
+                       rss_conf = (struct rte_flow_action_rss *)data;
+                       rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
+                       rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
+                                  sizeof(struct rte_flow_action_rss));
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
-                                                 RTE_FLOW_ERROR_TYPE_ACTION,
-                                                 actions,
-                                                 "Query action only support count");
+                               RTE_FLOW_ERROR_TYPE_ACTION,
+                               actions, "action is not supported");
                }
        }
+
        return 0;
 }
 
+static int
+hns3_flow_validate_wrap(struct rte_eth_dev *dev,
+                       const struct rte_flow_attr *attr,
+                       const struct rte_flow_item pattern[],
+                       const struct rte_flow_action actions[],
+                       struct rte_flow_error *error)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       pthread_mutex_lock(&hw->flows_lock);
+       ret = hns3_flow_validate(dev, attr, pattern, actions, error);
+       pthread_mutex_unlock(&hw->flows_lock);
+
+       return ret;
+}
+
+static struct rte_flow *
+hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+                     const struct rte_flow_item pattern[],
+                     const struct rte_flow_action actions[],
+                     struct rte_flow_error *error)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_flow *flow;
+
+       pthread_mutex_lock(&hw->flows_lock);
+       flow = hns3_flow_create(dev, attr, pattern, actions, error);
+       pthread_mutex_unlock(&hw->flows_lock);
+
+       return flow;
+}
+
+static int
+hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
+                      struct rte_flow_error *error)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       pthread_mutex_lock(&hw->flows_lock);
+       ret = hns3_flow_destroy(dev, flow, error);
+       pthread_mutex_unlock(&hw->flows_lock);
+
+       return ret;
+}
+
+static int
+hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       pthread_mutex_lock(&hw->flows_lock);
+       ret = hns3_flow_flush(dev, error);
+       pthread_mutex_unlock(&hw->flows_lock);
+
+       return ret;
+}
+
+static int
+hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
+                    const struct rte_flow_action *actions, void *data,
+                    struct rte_flow_error *error)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       pthread_mutex_lock(&hw->flows_lock);
+       ret = hns3_flow_query(dev, flow, actions, data, error);
+       pthread_mutex_unlock(&hw->flows_lock);
+
+       return ret;
+}
+
 static const struct rte_flow_ops hns3_flow_ops = {
-       .validate = hns3_flow_validate,
-       .create = hns3_flow_create,
-       .destroy = hns3_flow_destroy,
-       .flush = hns3_flow_flush,
-       .query = hns3_flow_query,
+       .validate = hns3_flow_validate_wrap,
+       .create = hns3_flow_create_wrap,
+       .destroy = hns3_flow_destroy_wrap,
+       .flush = hns3_flow_flush_wrap,
+       .query = hns3_flow_query_wrap,
        .isolate = NULL,
 };
 
-/*
- * The entry of flow API.
- * @param dev
- *   Pointer to Ethernet device.
- * @return
- *   0 on success, a negative errno value otherwise is set.
- */
 int
-hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
-                    enum rte_filter_op filter_op, void *arg)
+hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
+                     const struct rte_flow_ops **ops)
 {
        struct hns3_hw *hw;
-       int ret = 0;
 
-       if (dev == NULL)
-               return -EINVAL;
        hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       switch (filter_type) {
-       case RTE_ETH_FILTER_GENERIC:
-               if (filter_op != RTE_ETH_FILTER_GET)
-                       return -EINVAL;
-               if (hw->adapter_state >= HNS3_NIC_CLOSED)
-                       return -ENODEV;
-               *(const void **)arg = &hns3_flow_ops;
-               break;
-       default:
-               hns3_err(hw, "Filter type (%d) not supported", filter_type);
-               ret = -EOPNOTSUPP;
-               break;
-       }
+       if (hw->adapter_state >= HNS3_NIC_CLOSED)
+               return -ENODEV;
 
-       return ret;
+       *ops = &hns3_flow_ops;
+       return 0;
 }