net/hns3: fix queue flow action validation
[dpdk.git] / drivers / net / hns3 / hns3_flow.c
index f2bff1e..841e0b9 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2019 Hisilicon Limited.
+ * Copyright(c) 2018-2021 HiSilicon Limited.
  */
 
 #include <rte_flow_driver.h>
@@ -44,8 +44,7 @@ static enum rte_flow_item_type first_items[] = {
        RTE_FLOW_ITEM_TYPE_NVGRE,
        RTE_FLOW_ITEM_TYPE_VXLAN,
        RTE_FLOW_ITEM_TYPE_GENEVE,
-       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
-       RTE_FLOW_ITEM_TYPE_MPLS
+       RTE_FLOW_ITEM_TYPE_VXLAN_GPE
 };
 
 static enum rte_flow_item_type L2_next_items[] = {
@@ -65,8 +64,7 @@ static enum rte_flow_item_type L3_next_items[] = {
 static enum rte_flow_item_type L4_next_items[] = {
        RTE_FLOW_ITEM_TYPE_VXLAN,
        RTE_FLOW_ITEM_TYPE_GENEVE,
-       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
-       RTE_FLOW_ITEM_TYPE_MPLS
+       RTE_FLOW_ITEM_TYPE_VXLAN_GPE
 };
 
 static enum rte_flow_item_type tunnel_next_items[] = {
@@ -160,7 +158,10 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
 {
        struct hns3_adapter *hns = dev->data->dev_private;
        struct hns3_pf *pf = &hns->pf;
+       struct hns3_hw *hw = &hns->hw;
        struct hns3_flow_counter *cnt;
+       uint64_t value;
+       int ret;
 
        cnt = hns3_counter_lookup(dev, id);
        if (cnt) {
@@ -173,6 +174,13 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
                return 0;
        }
 
+       /* Clear the counter by read ops because the counter is read-clear */
+       ret = hns3_get_count(hw, id, &value);
+       if (ret)
+               return rte_flow_error_set(error, EIO,
+                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                         "Clear counter failed!");
+
        cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
        if (cnt == NULL)
                return rte_flow_error_set(error, ENOMEM,
@@ -215,6 +223,8 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
        }
        qc->hits_set = 1;
        qc->hits = value;
+       qc->bytes_set = 0;
+       qc->bytes = 0;
 
        return 0;
 }
@@ -265,10 +275,10 @@ hns3_handle_action_queue(struct rte_eth_dev *dev,
        struct hns3_hw *hw = &hns->hw;
 
        queue = (const struct rte_flow_action_queue *)action->conf;
-       if (queue->index >= hw->used_rx_queues) {
+       if (queue->index >= hw->data->nb_rx_queues) {
                hns3_err(hw, "queue ID(%u) is greater than number of "
                          "available queue (%u) in driver.",
-                         queue->index, hw->used_rx_queues);
+                         queue->index, hw->data->nb_rx_queues);
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          action, "Invalid queue ID in PF");
@@ -298,8 +308,8 @@ hns3_handle_action_queue_region(struct rte_eth_dev *dev,
 
        if ((!rte_is_power_of_2(conf->queue_num)) ||
                conf->queue_num > hw->rss_size_max ||
-               conf->queue[0] >= hw->used_rx_queues ||
-               conf->queue[0] + conf->queue_num > hw->used_rx_queues) {
+               conf->queue[0] >= hw->data->nb_rx_queues ||
+               conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) {
                return rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
                        "Invalid start queue ID and queue num! the start queue "
@@ -700,6 +710,7 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
        rule->key_conf.spec.ip_proto = IPPROTO_UDP;
        rule->key_conf.mask.ip_proto = IPPROTO_MASK;
+
        /* Only used to describe the protocol stack. */
        if (item->spec == NULL && item->mask == NULL)
                return 0;
@@ -1045,37 +1056,37 @@ hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
        case RTE_FLOW_ITEM_TYPE_ETH:
                ret = hns3_parse_eth(item, rule, error);
                step_mngr->items = L2_next_items;
-               step_mngr->count = ARRAY_SIZE(L2_next_items);
+               step_mngr->count = RTE_DIM(L2_next_items);
                break;
        case RTE_FLOW_ITEM_TYPE_VLAN:
                ret = hns3_parse_vlan(item, rule, error);
                step_mngr->items = L2_next_items;
-               step_mngr->count = ARRAY_SIZE(L2_next_items);
+               step_mngr->count = RTE_DIM(L2_next_items);
                break;
        case RTE_FLOW_ITEM_TYPE_IPV4:
                ret = hns3_parse_ipv4(item, rule, error);
                step_mngr->items = L3_next_items;
-               step_mngr->count = ARRAY_SIZE(L3_next_items);
+               step_mngr->count = RTE_DIM(L3_next_items);
                break;
        case RTE_FLOW_ITEM_TYPE_IPV6:
                ret = hns3_parse_ipv6(item, rule, error);
                step_mngr->items = L3_next_items;
-               step_mngr->count = ARRAY_SIZE(L3_next_items);
+               step_mngr->count = RTE_DIM(L3_next_items);
                break;
        case RTE_FLOW_ITEM_TYPE_TCP:
                ret = hns3_parse_tcp(item, rule, error);
                step_mngr->items = L4_next_items;
-               step_mngr->count = ARRAY_SIZE(L4_next_items);
+               step_mngr->count = RTE_DIM(L4_next_items);
                break;
        case RTE_FLOW_ITEM_TYPE_UDP:
                ret = hns3_parse_udp(item, rule, error);
                step_mngr->items = L4_next_items;
-               step_mngr->count = ARRAY_SIZE(L4_next_items);
+               step_mngr->count = RTE_DIM(L4_next_items);
                break;
        case RTE_FLOW_ITEM_TYPE_SCTP:
                ret = hns3_parse_sctp(item, rule, error);
                step_mngr->items = L4_next_items;
-               step_mngr->count = ARRAY_SIZE(L4_next_items);
+               step_mngr->count = RTE_DIM(L4_next_items);
                break;
        default:
                return rte_flow_error_set(error, ENOTSUP,
@@ -1117,49 +1128,35 @@ is_tunnel_packet(enum rte_flow_item_type type)
        if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
            type == RTE_FLOW_ITEM_TYPE_VXLAN ||
            type == RTE_FLOW_ITEM_TYPE_NVGRE ||
-           type == RTE_FLOW_ITEM_TYPE_GENEVE ||
-           type == RTE_FLOW_ITEM_TYPE_MPLS)
+           type == RTE_FLOW_ITEM_TYPE_GENEVE)
                return true;
        return false;
 }
 
 /*
- * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
- * And get the flow director filter info BTW.
- * UDP/TCP/SCTP PATTERN:
- * The first not void item can be ETH or IPV4 or IPV6
- * The second not void item must be IPV4 or IPV6 if the first one is ETH.
- * The next not void item could be UDP or TCP or SCTP (optional)
- * The next not void item could be RAW (for flexbyte, optional)
- * The next not void item must be END.
- * A Fuzzy Match pattern can appear at any place before END.
- * Fuzzy Match is optional for IPV4 but is required for IPV6
- * MAC VLAN PATTERN:
- * The first not void item must be ETH.
- * The second not void item must be MAC VLAN.
- * The next not void item must be END.
- * ACTION:
- * The first not void action should be QUEUE or DROP.
- * The second not void optional action should be MARK,
- * mark_id is a uint32_t number.
- * The next not void action should be END.
- * UDP/TCP/SCTP pattern example:
- * ITEM                Spec                    Mask
- * ETH         NULL                    NULL
- * IPV4                src_addr 192.168.1.20   0xFFFFFFFF
- *             dst_addr 192.167.3.50   0xFFFFFFFF
- * UDP/TCP/SCTP        src_port        80      0xFFFF
- *             dst_port        80      0xFFFF
- * END
- * MAC VLAN pattern example:
- * ITEM                Spec                    Mask
- * ETH         dst_addr
-               {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
-               0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
- * MAC VLAN    tci     0x2016          0xEFFF
- * END
- * Other members in mask and spec should set to 0x00.
- * Item->last should be NULL.
+ * Parse the flow director rule.
+ * The supported PATTERN:
+ *   case: non-tunnel packet:
+ *     ETH : src-mac, dst-mac, ethertype
+ *     VLAN: tag1, tag2
+ *     IPv4: src-ip, dst-ip, tos, proto
+ *     IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto
+ *     UDP : src-port, dst-port
+ *     TCP : src-port, dst-port
+ *     SCTP: src-port, dst-port, tag
+ *   case: tunnel packet:
+ *     OUTER-ETH: ethertype
+ *     OUTER-L3 : proto
+ *     OUTER-L4 : src-port, dst-port
+ *     TUNNEL   : vni, flow-id(only valid when NVGRE)
+ *     INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet
+ * The supported ACTION:
+ *    QUEUE
+ *    DROP
+ *    COUNT
+ *    MARK: the id range [0, 4094]
+ *    FLAG
+ *    RSS: only valid if firmware support FD_QUEUE_REGION.
  */
 static int
 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
@@ -1180,7 +1177,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev,
                                          "Fdir not supported in VF");
 
        step_mngr.items = first_items;
-       step_mngr.count = ARRAY_SIZE(first_items);
+       step_mngr.count = RTE_DIM(first_items);
        for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
                if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
                        continue;
@@ -1194,7 +1191,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev,
                        if (ret)
                                return ret;
                        step_mngr.items = tunnel_next_items;
-                       step_mngr.count = ARRAY_SIZE(tunnel_next_items);
+                       step_mngr.count = RTE_DIM(tunnel_next_items);
                } else {
                        ret = hns3_parse_normal(item, rule, &step_mngr, error);
                        if (ret)
@@ -1205,45 +1202,34 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev,
        return hns3_handle_actions(dev, actions, rule, error);
 }
 
-void
-hns3_filterlist_init(struct rte_eth_dev *dev)
-{
-       struct hns3_process_private *process_list = dev->process_private;
-
-       TAILQ_INIT(&process_list->fdir_list);
-       TAILQ_INIT(&process_list->filter_rss_list);
-       TAILQ_INIT(&process_list->flow_list);
-}
-
 static void
 hns3_filterlist_flush(struct rte_eth_dev *dev)
 {
-       struct hns3_process_private *process_list = dev->process_private;
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct hns3_fdir_rule_ele *fdir_rule_ptr;
        struct hns3_rss_conf_ele *rss_filter_ptr;
        struct hns3_flow_mem *flow_node;
 
-       fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
+       fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
        while (fdir_rule_ptr) {
-               TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
+               TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
                rte_free(fdir_rule_ptr);
-               fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
+               fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
        }
 
-       rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
+       rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
        while (rss_filter_ptr) {
-               TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
-                            entries);
+               TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
                rte_free(rss_filter_ptr);
-               rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
+               rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
        }
 
-       flow_node = TAILQ_FIRST(&process_list->flow_list);
+       flow_node = TAILQ_FIRST(&hw->flow_list);
        while (flow_node) {
-               TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
+               TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
                rte_free(flow_node->flow);
                rte_free(flow_node);
-               flow_node = TAILQ_FIRST(&process_list->flow_list);
+               flow_node = TAILQ_FIRST(&hw->flow_list);
        }
 }
 
@@ -1264,7 +1250,7 @@ hns3_action_rss_same(const struct rte_flow_action_rss *comp,
        if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
                func_is_same = false;
        else
-               func_is_same = (with->func ? (comp->func == with->func) : true);
+               func_is_same = with->func ? (comp->func == with->func) : true;
 
        return (func_is_same &&
                comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
@@ -1446,7 +1432,7 @@ hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
                *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
                break;
        default:
-               hns3_err(hw, "Invalid RSS algorithm configuration(%u)",
+               hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
                         algo_func);
                return -EINVAL;
        }
@@ -1488,14 +1474,14 @@ hns3_update_indir_table(struct rte_eth_dev *dev,
 {
        struct hns3_adapter *hns = dev->data->dev_private;
        struct hns3_hw *hw = &hns->hw;
-       uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE];
+       uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
        uint16_t j;
        uint32_t i;
 
        /* Fill in redirection table */
        memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
               sizeof(hw->rss_info.rss_indirection_tbl));
-       for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) {
+       for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
                j %= num;
                if (conf->queue[j] >= hw->alloc_rss_size) {
                        hns3_err(hw, "queue id(%u) set to redirection table "
@@ -1506,14 +1492,13 @@ hns3_update_indir_table(struct rte_eth_dev *dev,
                indir_tbl[i] = conf->queue[j];
        }
 
-       return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE);
+       return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
 }
 
 static int
 hns3_config_rss_filter(struct rte_eth_dev *dev,
                       const struct hns3_rss_conf *conf, bool add)
 {
-       struct hns3_process_private *process_list = dev->process_private;
        struct hns3_adapter *hns = dev->data->dev_private;
        struct hns3_rss_conf_ele *rss_filter_ptr;
        struct hns3_hw *hw = &hns->hw;
@@ -1539,7 +1524,7 @@ hns3_config_rss_filter(struct rte_eth_dev *dev,
                     hw->rss_info.conf.types;
        if (flow_types != rss_flow_conf.types)
                hns3_warn(hw, "modified RSS types based on hardware support, "
-                             "requested:%" PRIx64 " configured:%" PRIx64,
+                             "requested:0x%" PRIx64 " configured:0x%" PRIx64,
                          rss_flow_conf.types, flow_types);
        /* Update the useful flow types */
        rss_flow_conf.types = flow_types;
@@ -1598,7 +1583,7 @@ hns3_config_rss_filter(struct rte_eth_dev *dev,
         * When create a new RSS rule, the old rule will be overlaid and set
         * invalid.
         */
-       TAILQ_FOREACH(rss_filter_ptr, &process_list->filter_rss_list, entries)
+       TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries)
                rss_filter_ptr->filter_info.valid = false;
 
 rss_config_err:
@@ -1610,7 +1595,6 @@ rss_config_err:
 static int
 hns3_clear_rss_filter(struct rte_eth_dev *dev)
 {
-       struct hns3_process_private *process_list = dev->process_private;
        struct hns3_adapter *hns = dev->data->dev_private;
        struct hns3_rss_conf_ele *rss_filter_ptr;
        struct hns3_hw *hw = &hns->hw;
@@ -1618,10 +1602,9 @@ hns3_clear_rss_filter(struct rte_eth_dev *dev)
        int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
        int ret = 0;
 
-       rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
+       rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
        while (rss_filter_ptr) {
-               TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
-                            entries);
+               TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
                ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
                                             false);
                if (ret)
@@ -1629,7 +1612,7 @@ hns3_clear_rss_filter(struct rte_eth_dev *dev)
                else
                        rss_rule_succ_cnt++;
                rte_free(rss_filter_ptr);
-               rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
+               rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
        }
 
        if (rss_rule_fail_cnt) {
@@ -1733,7 +1716,6 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                 const struct rte_flow_action actions[],
                 struct rte_flow_error *error)
 {
-       struct hns3_process_private *process_list = dev->process_private;
        struct hns3_adapter *hns = dev->data->dev_private;
        struct hns3_hw *hw = &hns->hw;
        const struct hns3_rss_conf *rss_conf;
@@ -1765,7 +1747,7 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        }
 
        flow_node->flow = flow;
-       TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
+       TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
 
        act = hns3_find_rss_general_action(pattern, actions);
        if (act) {
@@ -1787,8 +1769,7 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                hns3_rss_conf_copy(&rss_filter_ptr->filter_info,
                                   &rss_conf->conf);
                rss_filter_ptr->filter_info.valid = true;
-               TAILQ_INSERT_TAIL(&process_list->filter_rss_list,
-                                 rss_filter_ptr, entries);
+               TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
 
                flow->rule = rss_filter_ptr;
                flow->filter_type = RTE_ETH_FILTER_HASH;
@@ -1808,36 +1789,36 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 
                flow->counter_id = fdir_rule.act_cnt.id;
        }
+
+       fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
+                                   sizeof(struct hns3_fdir_rule_ele),
+                                   0);
+       if (fdir_rule_ptr == NULL) {
+               hns3_err(hw, "failed to allocate fdir_rule memory.");
+               ret = -ENOMEM;
+               goto err_fdir;
+       }
+
        ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
        if (!ret) {
-               fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
-                                           sizeof(struct hns3_fdir_rule_ele),
-                                           0);
-               if (fdir_rule_ptr == NULL) {
-                       hns3_err(hw, "Failed to allocate fdir_rule memory");
-                       ret = -ENOMEM;
-                       goto err_fdir;
-               }
-
                memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
                        sizeof(struct hns3_fdir_rule));
-               TAILQ_INSERT_TAIL(&process_list->fdir_list,
-                                 fdir_rule_ptr, entries);
+               TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
                flow->rule = fdir_rule_ptr;
                flow->filter_type = RTE_ETH_FILTER_FDIR;
 
                return flow;
        }
 
+       rte_free(fdir_rule_ptr);
 err_fdir:
        if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
                hns3_counter_release(dev, fdir_rule.act_cnt.id);
-
 err:
        rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
                           "Failed to create flow");
 out:
-       TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
+       TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
        rte_free(flow_node);
        rte_free(flow);
        return NULL;
@@ -1848,19 +1829,20 @@ static int
 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
                  struct rte_flow_error *error)
 {
-       struct hns3_process_private *process_list = dev->process_private;
        struct hns3_adapter *hns = dev->data->dev_private;
        struct hns3_fdir_rule_ele *fdir_rule_ptr;
        struct hns3_rss_conf_ele *rss_filter_ptr;
        struct hns3_flow_mem *flow_node;
        enum rte_filter_type filter_type;
        struct hns3_fdir_rule fdir_rule;
+       struct hns3_hw *hw = &hns->hw;
        int ret;
 
        if (flow == NULL)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_HANDLE,
                                          flow, "Flow is NULL");
+
        filter_type = flow->filter_type;
        switch (filter_type) {
        case RTE_ETH_FILTER_FDIR:
@@ -1876,7 +1858,7 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
                                                  "Destroy FDIR fail.Try again");
                if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
                        hns3_counter_release(dev, fdir_rule.act_cnt.id);
-               TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
+               TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
                rte_free(fdir_rule_ptr);
                fdir_rule_ptr = NULL;
                break;
@@ -1889,8 +1871,7 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
                                                  RTE_FLOW_ERROR_TYPE_HANDLE,
                                                  flow,
                                                  "Destroy RSS fail.Try again");
-               TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
-                            entries);
+               TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
                rte_free(rss_filter_ptr);
                rss_filter_ptr = NULL;
                break;
@@ -1900,10 +1881,9 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
                                          "Unsupported filter type");
        }
 
-       TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) {
+       TAILQ_FOREACH(flow_node, &hw->flow_list, entries) {
                if (flow_node->flow == flow) {
-                       TAILQ_REMOVE(&process_list->flow_list, flow_node,
-                                    entries);
+                       TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
                        rte_free(flow_node);
                        flow_node = NULL;
                        break;
@@ -1992,43 +1972,127 @@ hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
        return 0;
 }
 
+static int
+hns3_flow_validate_wrap(struct rte_eth_dev *dev,
+                       const struct rte_flow_attr *attr,
+                       const struct rte_flow_item pattern[],
+                       const struct rte_flow_action actions[],
+                       struct rte_flow_error *error)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       pthread_mutex_lock(&hw->flows_lock);
+       ret = hns3_flow_validate(dev, attr, pattern, actions, error);
+       pthread_mutex_unlock(&hw->flows_lock);
+
+       return ret;
+}
+
+static struct rte_flow *
+hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+                     const struct rte_flow_item pattern[],
+                     const struct rte_flow_action actions[],
+                     struct rte_flow_error *error)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_flow *flow;
+
+       pthread_mutex_lock(&hw->flows_lock);
+       flow = hns3_flow_create(dev, attr, pattern, actions, error);
+       pthread_mutex_unlock(&hw->flows_lock);
+
+       return flow;
+}
+
+static int
+hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
+                      struct rte_flow_error *error)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       pthread_mutex_lock(&hw->flows_lock);
+       ret = hns3_flow_destroy(dev, flow, error);
+       pthread_mutex_unlock(&hw->flows_lock);
+
+       return ret;
+}
+
+static int
+hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       pthread_mutex_lock(&hw->flows_lock);
+       ret = hns3_flow_flush(dev, error);
+       pthread_mutex_unlock(&hw->flows_lock);
+
+       return ret;
+}
+
+static int
+hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
+                    const struct rte_flow_action *actions, void *data,
+                    struct rte_flow_error *error)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       pthread_mutex_lock(&hw->flows_lock);
+       ret = hns3_flow_query(dev, flow, actions, data, error);
+       pthread_mutex_unlock(&hw->flows_lock);
+
+       return ret;
+}
+
 static const struct rte_flow_ops hns3_flow_ops = {
-       .validate = hns3_flow_validate,
-       .create = hns3_flow_create,
-       .destroy = hns3_flow_destroy,
-       .flush = hns3_flow_flush,
-       .query = hns3_flow_query,
+       .validate = hns3_flow_validate_wrap,
+       .create = hns3_flow_create_wrap,
+       .destroy = hns3_flow_destroy_wrap,
+       .flush = hns3_flow_flush_wrap,
+       .query = hns3_flow_query_wrap,
        .isolate = NULL,
 };
 
-/*
- * The entry of flow API.
- * @param dev
- *   Pointer to Ethernet device.
- * @return
- *   0 on success, a negative errno value otherwise is set.
- */
 int
-hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
-                    enum rte_filter_op filter_op, void *arg)
+hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
+                     const struct rte_flow_ops **ops)
 {
        struct hns3_hw *hw;
-       int ret = 0;
 
        hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       switch (filter_type) {
-       case RTE_ETH_FILTER_GENERIC:
-               if (filter_op != RTE_ETH_FILTER_GET)
-                       return -EINVAL;
-               if (hw->adapter_state >= HNS3_NIC_CLOSED)
-                       return -ENODEV;
-               *(const void **)arg = &hns3_flow_ops;
-               break;
-       default:
-               hns3_err(hw, "Filter type (%d) not supported", filter_type);
-               ret = -EOPNOTSUPP;
-               break;
-       }
+       if (hw->adapter_state >= HNS3_NIC_CLOSED)
+               return -ENODEV;
 
-       return ret;
+       *ops = &hns3_flow_ops;
+       return 0;
+}
+
+void
+hns3_flow_init(struct rte_eth_dev *dev)
+{
+       struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       pthread_mutexattr_t attr;
+
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return;
+
+       pthread_mutexattr_init(&attr);
+       pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
+       pthread_mutex_init(&hw->flows_lock, &attr);
+       dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
+
+       TAILQ_INIT(&hw->flow_fdir_list);
+       TAILQ_INIT(&hw->flow_rss_list);
+       TAILQ_INIT(&hw->flow_list);
+}
+
+void
+hns3_flow_uninit(struct rte_eth_dev *dev)
+{
+       struct rte_flow_error error;
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               hns3_flow_flush_wrap(dev, &error);
 }