net/hns3: fix rollback on RSS hash update
[dpdk.git] / drivers / net / hns3 / hns3_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11 #include "hns3_flow.h"
12
13 /* Default default keys */
14 static uint8_t hns3_hash_key[] = {
15         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
16         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
17         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
18         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
19         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
20 };
21
22 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
23 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
24
25 /* Special Filter id for non-specific packet flagging. Don't change value */
26 #define HNS3_MAX_FILTER_ID      0x0FFF
27
28 #define ETHER_TYPE_MASK         0xFFFF
29 #define IPPROTO_MASK            0xFF
30 #define TUNNEL_TYPE_MASK        0xFFFF
31
32 #define HNS3_TUNNEL_TYPE_VXLAN          0x12B5
33 #define HNS3_TUNNEL_TYPE_VXLAN_GPE      0x12B6
34 #define HNS3_TUNNEL_TYPE_GENEVE         0x17C1
35 #define HNS3_TUNNEL_TYPE_NVGRE          0x6558
36
37 static enum rte_flow_item_type first_items[] = {
38         RTE_FLOW_ITEM_TYPE_ETH,
39         RTE_FLOW_ITEM_TYPE_IPV4,
40         RTE_FLOW_ITEM_TYPE_IPV6,
41         RTE_FLOW_ITEM_TYPE_TCP,
42         RTE_FLOW_ITEM_TYPE_UDP,
43         RTE_FLOW_ITEM_TYPE_SCTP,
44         RTE_FLOW_ITEM_TYPE_ICMP,
45         RTE_FLOW_ITEM_TYPE_NVGRE,
46         RTE_FLOW_ITEM_TYPE_VXLAN,
47         RTE_FLOW_ITEM_TYPE_GENEVE,
48         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
49 };
50
51 static enum rte_flow_item_type L2_next_items[] = {
52         RTE_FLOW_ITEM_TYPE_VLAN,
53         RTE_FLOW_ITEM_TYPE_IPV4,
54         RTE_FLOW_ITEM_TYPE_IPV6
55 };
56
57 static enum rte_flow_item_type L3_next_items[] = {
58         RTE_FLOW_ITEM_TYPE_TCP,
59         RTE_FLOW_ITEM_TYPE_UDP,
60         RTE_FLOW_ITEM_TYPE_SCTP,
61         RTE_FLOW_ITEM_TYPE_NVGRE,
62         RTE_FLOW_ITEM_TYPE_ICMP
63 };
64
65 static enum rte_flow_item_type L4_next_items[] = {
66         RTE_FLOW_ITEM_TYPE_VXLAN,
67         RTE_FLOW_ITEM_TYPE_GENEVE,
68         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
69 };
70
71 static enum rte_flow_item_type tunnel_next_items[] = {
72         RTE_FLOW_ITEM_TYPE_ETH,
73         RTE_FLOW_ITEM_TYPE_VLAN
74 };
75
76 struct items_step_mngr {
77         enum rte_flow_item_type *items;
78         int count;
79 };
80
81 static inline void
82 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
83 {
84         size_t i;
85
86         for (i = 0; i < len; i++)
87                 dst[i] = rte_be_to_cpu_32(src[i]);
88 }
89
90 /*
91  * This function is used to find rss general action.
92  * 1. As we know RSS is used to spread packets among several queues, the flow
93  *    API provide the struct rte_flow_action_rss, user could config its field
94  *    sush as: func/level/types/key/queue to control RSS function.
95  * 2. The flow API also supports queue region configuration for hns3. It was
96  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
97  *    which action is RSS queues region.
98  * 3. When action is RSS, we use the following rule to distinguish:
99  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
100  *            region configuration.
101  *    Case other: an rss general action.
102  */
103 static const struct rte_flow_action *
104 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
105                              const struct rte_flow_action actions[])
106 {
107         const struct rte_flow_action *act = NULL;
108         const struct hns3_rss_conf *rss;
109         bool have_eth = false;
110
111         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
112                 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
113                         act = actions;
114                         break;
115                 }
116         }
117         if (!act)
118                 return NULL;
119
120         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
121                 if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
122                         have_eth = true;
123                         break;
124                 }
125         }
126
127         rss = act->conf;
128         if (have_eth && rss->conf.queue_num) {
129                 /*
130                  * Pattern have ETH and action's queue_num > 0, indicate this is
131                  * queue region configuration.
132                  * Because queue region is implemented by FDIR + RSS in hns3
133                  * hardware, it needs to enter FDIR process, so here return NULL
134                  * to avoid enter RSS process.
135                  */
136                 return NULL;
137         }
138
139         return act;
140 }
141
142 static inline struct hns3_flow_counter *
143 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
144 {
145         struct hns3_adapter *hns = dev->data->dev_private;
146         struct hns3_pf *pf = &hns->pf;
147         struct hns3_flow_counter *cnt;
148
149         LIST_FOREACH(cnt, &pf->flow_counters, next) {
150                 if (cnt->id == id)
151                         return cnt;
152         }
153         return NULL;
154 }
155
156 static int
157 hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id,
158                  struct rte_flow_error *error)
159 {
160         struct hns3_adapter *hns = dev->data->dev_private;
161         struct hns3_pf *pf = &hns->pf;
162         struct hns3_hw *hw = &hns->hw;
163         struct hns3_flow_counter *cnt;
164         uint64_t value;
165         int ret;
166
167         cnt = hns3_counter_lookup(dev, id);
168         if (cnt) {
169                 if (!cnt->indirect || cnt->indirect != indirect)
170                         return rte_flow_error_set(error, ENOTSUP,
171                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
172                                 cnt,
173                                 "Counter id is used, indirect flag not match");
174                 /* Clear the indirect counter on first use. */
175                 if (cnt->indirect && cnt->ref_cnt == 1)
176                         (void)hns3_get_count(hw, id, &value);
177                 cnt->ref_cnt++;
178                 return 0;
179         }
180
181         /* Clear the counter by read ops because the counter is read-clear */
182         ret = hns3_get_count(hw, id, &value);
183         if (ret)
184                 return rte_flow_error_set(error, EIO,
185                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
186                                           "Clear counter failed!");
187
188         cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
189         if (cnt == NULL)
190                 return rte_flow_error_set(error, ENOMEM,
191                                           RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
192                                           "Alloc mem for counter failed");
193         cnt->id = id;
194         cnt->indirect = indirect;
195         cnt->ref_cnt = 1;
196         cnt->hits = 0;
197         LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
198         return 0;
199 }
200
201 static int
202 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
203                    struct rte_flow_query_count *qc,
204                    struct rte_flow_error *error)
205 {
206         struct hns3_adapter *hns = dev->data->dev_private;
207         struct hns3_flow_counter *cnt;
208         uint64_t value;
209         int ret;
210
211         /* FDIR is available only in PF driver */
212         if (hns->is_vf)
213                 return rte_flow_error_set(error, ENOTSUP,
214                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
215                                           "Fdir is not supported in VF");
216         cnt = hns3_counter_lookup(dev, flow->counter_id);
217         if (cnt == NULL)
218                 return rte_flow_error_set(error, EINVAL,
219                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
220                                           "Can't find counter id");
221
222         ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
223         if (ret) {
224                 rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
225                                    NULL, "Read counter fail.");
226                 return ret;
227         }
228         qc->hits_set = 1;
229         qc->hits = value;
230         qc->bytes_set = 0;
231         qc->bytes = 0;
232
233         return 0;
234 }
235
236 static int
237 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
238 {
239         struct hns3_adapter *hns = dev->data->dev_private;
240         struct hns3_hw *hw = &hns->hw;
241         struct hns3_flow_counter *cnt;
242
243         cnt = hns3_counter_lookup(dev, id);
244         if (cnt == NULL) {
245                 hns3_err(hw, "Can't find available counter to release");
246                 return -EINVAL;
247         }
248         cnt->ref_cnt--;
249         if (cnt->ref_cnt == 0) {
250                 LIST_REMOVE(cnt, next);
251                 rte_free(cnt);
252         }
253         return 0;
254 }
255
256 static void
257 hns3_counter_flush(struct rte_eth_dev *dev)
258 {
259         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
260         LIST_HEAD(counters, hns3_flow_counter) indir_counters;
261         struct hns3_flow_counter *cnt_ptr;
262
263         LIST_INIT(&indir_counters);
264         cnt_ptr = LIST_FIRST(&pf->flow_counters);
265         while (cnt_ptr) {
266                 LIST_REMOVE(cnt_ptr, next);
267                 if (cnt_ptr->indirect)
268                         LIST_INSERT_HEAD(&indir_counters, cnt_ptr, next);
269                 else
270                         rte_free(cnt_ptr);
271                 cnt_ptr = LIST_FIRST(&pf->flow_counters);
272         }
273
274         /* Reset the indirect action and add to pf->flow_counters list. */
275         cnt_ptr = LIST_FIRST(&indir_counters);
276         while (cnt_ptr) {
277                 LIST_REMOVE(cnt_ptr, next);
278                 cnt_ptr->ref_cnt = 1;
279                 cnt_ptr->hits = 0;
280                 LIST_INSERT_HEAD(&pf->flow_counters, cnt_ptr, next);
281                 cnt_ptr = LIST_FIRST(&indir_counters);
282         }
283 }
284
285 static int
286 hns3_handle_action_queue(struct rte_eth_dev *dev,
287                          const struct rte_flow_action *action,
288                          struct hns3_fdir_rule *rule,
289                          struct rte_flow_error *error)
290 {
291         struct hns3_adapter *hns = dev->data->dev_private;
292         const struct rte_flow_action_queue *queue;
293         struct hns3_hw *hw = &hns->hw;
294
295         queue = (const struct rte_flow_action_queue *)action->conf;
296         if (queue->index >= hw->data->nb_rx_queues) {
297                 hns3_err(hw, "queue ID(%u) is greater than number of "
298                           "available queue (%u) in driver.",
299                           queue->index, hw->data->nb_rx_queues);
300                 return rte_flow_error_set(error, EINVAL,
301                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
302                                           action, "Invalid queue ID in PF");
303         }
304
305         rule->queue_id = queue->index;
306         rule->nb_queues = 1;
307         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
308         return 0;
309 }
310
311 static int
312 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
313                                 const struct rte_flow_action *action,
314                                 struct hns3_fdir_rule *rule,
315                                 struct rte_flow_error *error)
316 {
317         struct hns3_adapter *hns = dev->data->dev_private;
318         const struct rte_flow_action_rss *conf = action->conf;
319         struct hns3_hw *hw = &hns->hw;
320         uint16_t idx;
321
322         if (!hns3_dev_get_support(hw, FD_QUEUE_REGION))
323                 return rte_flow_error_set(error, ENOTSUP,
324                         RTE_FLOW_ERROR_TYPE_ACTION, action,
325                         "Not support config queue region!");
326
327         if ((!rte_is_power_of_2(conf->queue_num)) ||
328                 conf->queue_num > hw->rss_size_max ||
329                 conf->queue[0] >= hw->data->nb_rx_queues ||
330                 conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) {
331                 return rte_flow_error_set(error, EINVAL,
332                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
333                         "Invalid start queue ID and queue num! the start queue "
334                         "ID must valid, the queue num must be power of 2 and "
335                         "<= rss_size_max.");
336         }
337
338         for (idx = 1; idx < conf->queue_num; idx++) {
339                 if (conf->queue[idx] != conf->queue[idx - 1] + 1)
340                         return rte_flow_error_set(error, EINVAL,
341                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
342                                 "Invalid queue ID sequence! the queue ID "
343                                 "must be continuous increment.");
344         }
345
346         rule->queue_id = conf->queue[0];
347         rule->nb_queues = conf->queue_num;
348         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
349         return 0;
350 }
351
352 static int
353 hns3_handle_action_indirect(struct rte_eth_dev *dev,
354                             const struct rte_flow_action *action,
355                             struct hns3_fdir_rule *rule,
356                             struct rte_flow_error *error)
357 {
358         const struct rte_flow_action_handle *indir = action->conf;
359
360         if (indir->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT)
361                 return rte_flow_error_set(error, EINVAL,
362                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
363                                 action, "Invalid indirect type");
364
365         if (hns3_counter_lookup(dev, indir->counter_id) == NULL)
366                 return rte_flow_error_set(error, EINVAL,
367                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
368                                 action, "Counter id not exist");
369
370         rule->act_cnt.id = indir->counter_id;
371         rule->flags |= (HNS3_RULE_FLAG_COUNTER | HNS3_RULE_FLAG_COUNTER_INDIR);
372
373         return 0;
374 }
375
376 /*
377  * Parse actions structure from the provided pattern.
378  * The pattern is validated as the items are copied.
379  *
380  * @param actions[in]
381  * @param rule[out]
382  *   NIC specific actions derived from the actions.
383  * @param error[out]
384  */
385 static int
386 hns3_handle_actions(struct rte_eth_dev *dev,
387                     const struct rte_flow_action actions[],
388                     struct hns3_fdir_rule *rule, struct rte_flow_error *error)
389 {
390         struct hns3_adapter *hns = dev->data->dev_private;
391         const struct rte_flow_action_count *act_count;
392         const struct rte_flow_action_mark *mark;
393         struct hns3_pf *pf = &hns->pf;
394         uint32_t counter_num;
395         int ret;
396
397         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
398                 switch (actions->type) {
399                 case RTE_FLOW_ACTION_TYPE_QUEUE:
400                         ret = hns3_handle_action_queue(dev, actions, rule,
401                                                        error);
402                         if (ret)
403                                 return ret;
404                         break;
405                 case RTE_FLOW_ACTION_TYPE_DROP:
406                         rule->action = HNS3_FD_ACTION_DROP_PACKET;
407                         break;
408                 /*
409                  * Here RSS's real action is queue region.
410                  * Queue region is implemented by FDIR + RSS in hns3 hardware,
411                  * the FDIR's action is one queue region (start_queue_id and
412                  * queue_num), then RSS spread packets to the queue region by
413                  * RSS algorithm.
414                  */
415                 case RTE_FLOW_ACTION_TYPE_RSS:
416                         ret = hns3_handle_action_queue_region(dev, actions,
417                                                               rule, error);
418                         if (ret)
419                                 return ret;
420                         break;
421                 case RTE_FLOW_ACTION_TYPE_MARK:
422                         mark =
423                             (const struct rte_flow_action_mark *)actions->conf;
424                         if (mark->id >= HNS3_MAX_FILTER_ID)
425                                 return rte_flow_error_set(error, EINVAL,
426                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
427                                                 actions,
428                                                 "Invalid Mark ID");
429                         rule->fd_id = mark->id;
430                         rule->flags |= HNS3_RULE_FLAG_FDID;
431                         break;
432                 case RTE_FLOW_ACTION_TYPE_FLAG:
433                         rule->fd_id = HNS3_MAX_FILTER_ID;
434                         rule->flags |= HNS3_RULE_FLAG_FDID;
435                         break;
436                 case RTE_FLOW_ACTION_TYPE_COUNT:
437                         act_count =
438                             (const struct rte_flow_action_count *)actions->conf;
439                         counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
440                         if (act_count->id >= counter_num)
441                                 return rte_flow_error_set(error, EINVAL,
442                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
443                                                 actions,
444                                                 "Invalid counter id");
445                         rule->act_cnt = *act_count;
446                         rule->flags |= HNS3_RULE_FLAG_COUNTER;
447                         rule->flags &= ~HNS3_RULE_FLAG_COUNTER_INDIR;
448                         break;
449                 case RTE_FLOW_ACTION_TYPE_INDIRECT:
450                         ret = hns3_handle_action_indirect(dev, actions, rule,
451                                                           error);
452                         if (ret)
453                                 return ret;
454                         break;
455                 case RTE_FLOW_ACTION_TYPE_VOID:
456                         break;
457                 default:
458                         return rte_flow_error_set(error, ENOTSUP,
459                                                   RTE_FLOW_ERROR_TYPE_ACTION,
460                                                   NULL, "Unsupported action");
461                 }
462         }
463
464         return 0;
465 }
466
467 static int
468 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
469 {
470         if (!attr->ingress)
471                 return rte_flow_error_set(error, EINVAL,
472                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
473                                           attr, "Ingress can't be zero");
474         if (attr->egress)
475                 return rte_flow_error_set(error, ENOTSUP,
476                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
477                                           attr, "Not support egress");
478         if (attr->transfer)
479                 return rte_flow_error_set(error, ENOTSUP,
480                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
481                                           attr, "No support for transfer");
482         if (attr->priority)
483                 return rte_flow_error_set(error, ENOTSUP,
484                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
485                                           attr, "Not support priority");
486         if (attr->group)
487                 return rte_flow_error_set(error, ENOTSUP,
488                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
489                                           attr, "Not support group");
490         return 0;
491 }
492
493 static int
494 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
495                struct rte_flow_error *error __rte_unused)
496 {
497         const struct rte_flow_item_eth *eth_spec;
498         const struct rte_flow_item_eth *eth_mask;
499
500         /* Only used to describe the protocol stack. */
501         if (item->spec == NULL && item->mask == NULL)
502                 return 0;
503
504         if (item->mask) {
505                 eth_mask = item->mask;
506                 if (eth_mask->type) {
507                         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
508                         rule->key_conf.mask.ether_type =
509                             rte_be_to_cpu_16(eth_mask->type);
510                 }
511                 if (!rte_is_zero_ether_addr(&eth_mask->src)) {
512                         hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
513                         memcpy(rule->key_conf.mask.src_mac,
514                                eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
515                 }
516                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
517                         hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
518                         memcpy(rule->key_conf.mask.dst_mac,
519                                eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
520                 }
521         }
522
523         eth_spec = item->spec;
524         rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
525         memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
526                RTE_ETHER_ADDR_LEN);
527         memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
528                RTE_ETHER_ADDR_LEN);
529         return 0;
530 }
531
532 static int
533 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
534                 struct rte_flow_error *error)
535 {
536         const struct rte_flow_item_vlan *vlan_spec;
537         const struct rte_flow_item_vlan *vlan_mask;
538
539         rule->key_conf.vlan_num++;
540         if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
541                 return rte_flow_error_set(error, EINVAL,
542                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
543                                           "Vlan_num is more than 2");
544
545         /* Only used to describe the protocol stack. */
546         if (item->spec == NULL && item->mask == NULL)
547                 return 0;
548
549         if (item->mask) {
550                 vlan_mask = item->mask;
551                 if (vlan_mask->tci) {
552                         if (rule->key_conf.vlan_num == 1) {
553                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
554                                              1);
555                                 rule->key_conf.mask.vlan_tag1 =
556                                     rte_be_to_cpu_16(vlan_mask->tci);
557                         } else {
558                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
559                                              1);
560                                 rule->key_conf.mask.vlan_tag2 =
561                                     rte_be_to_cpu_16(vlan_mask->tci);
562                         }
563                 }
564         }
565
566         vlan_spec = item->spec;
567         if (rule->key_conf.vlan_num == 1)
568                 rule->key_conf.spec.vlan_tag1 =
569                     rte_be_to_cpu_16(vlan_spec->tci);
570         else
571                 rule->key_conf.spec.vlan_tag2 =
572                     rte_be_to_cpu_16(vlan_spec->tci);
573         return 0;
574 }
575
576 static bool
577 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
578 {
579         if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
580             ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
581             ipv4_mask->hdr.hdr_checksum)
582                 return false;
583
584         return true;
585 }
586
587 static int
588 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
589                 struct rte_flow_error *error)
590 {
591         const struct rte_flow_item_ipv4 *ipv4_spec;
592         const struct rte_flow_item_ipv4 *ipv4_mask;
593
594         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
595         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
596         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
597
598         /* Only used to describe the protocol stack. */
599         if (item->spec == NULL && item->mask == NULL)
600                 return 0;
601
602         if (item->mask) {
603                 ipv4_mask = item->mask;
604                 if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
605                         return rte_flow_error_set(error, EINVAL,
606                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
607                                                   item,
608                                                   "Only support src & dst ip,tos,proto in IPV4");
609                 }
610
611                 if (ipv4_mask->hdr.src_addr) {
612                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
613                         rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
614                             rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
615                 }
616
617                 if (ipv4_mask->hdr.dst_addr) {
618                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
619                         rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
620                             rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
621                 }
622
623                 if (ipv4_mask->hdr.type_of_service) {
624                         hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
625                         rule->key_conf.mask.ip_tos =
626                             ipv4_mask->hdr.type_of_service;
627                 }
628
629                 if (ipv4_mask->hdr.next_proto_id) {
630                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
631                         rule->key_conf.mask.ip_proto =
632                             ipv4_mask->hdr.next_proto_id;
633                 }
634         }
635
636         ipv4_spec = item->spec;
637         rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
638             rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
639         rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
640             rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
641         rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
642         rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
643         return 0;
644 }
645
646 static int
647 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
648                 struct rte_flow_error *error)
649 {
650         const struct rte_flow_item_ipv6 *ipv6_spec;
651         const struct rte_flow_item_ipv6 *ipv6_mask;
652
653         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
654         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
655         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
656
657         /* Only used to describe the protocol stack. */
658         if (item->spec == NULL && item->mask == NULL)
659                 return 0;
660
661         if (item->mask) {
662                 ipv6_mask = item->mask;
663                 if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
664                     ipv6_mask->hdr.hop_limits) {
665                         return rte_flow_error_set(error, EINVAL,
666                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
667                                                   item,
668                                                   "Only support src & dst ip,proto in IPV6");
669                 }
670                 net_addr_to_host(rule->key_conf.mask.src_ip,
671                                  (const rte_be32_t *)ipv6_mask->hdr.src_addr,
672                                  IP_ADDR_LEN);
673                 net_addr_to_host(rule->key_conf.mask.dst_ip,
674                                  (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
675                                  IP_ADDR_LEN);
676                 rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
677                 if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
678                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
679                 if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
680                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
681                 if (ipv6_mask->hdr.proto)
682                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
683         }
684
685         ipv6_spec = item->spec;
686         net_addr_to_host(rule->key_conf.spec.src_ip,
687                          (const rte_be32_t *)ipv6_spec->hdr.src_addr,
688                          IP_ADDR_LEN);
689         net_addr_to_host(rule->key_conf.spec.dst_ip,
690                          (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
691                          IP_ADDR_LEN);
692         rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
693
694         return 0;
695 }
696
697 static bool
698 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
699 {
700         if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
701             tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
702             tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
703             tcp_mask->hdr.tcp_urp)
704                 return false;
705
706         return true;
707 }
708
709 static int
710 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
711                struct rte_flow_error *error)
712 {
713         const struct rte_flow_item_tcp *tcp_spec;
714         const struct rte_flow_item_tcp *tcp_mask;
715
716         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
717         rule->key_conf.spec.ip_proto = IPPROTO_TCP;
718         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
719
720         /* Only used to describe the protocol stack. */
721         if (item->spec == NULL && item->mask == NULL)
722                 return 0;
723
724         if (item->mask) {
725                 tcp_mask = item->mask;
726                 if (!hns3_check_tcp_mask_supported(tcp_mask)) {
727                         return rte_flow_error_set(error, EINVAL,
728                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
729                                                   item,
730                                                   "Only support src & dst port in TCP");
731                 }
732
733                 if (tcp_mask->hdr.src_port) {
734                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
735                         rule->key_conf.mask.src_port =
736                             rte_be_to_cpu_16(tcp_mask->hdr.src_port);
737                 }
738                 if (tcp_mask->hdr.dst_port) {
739                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
740                         rule->key_conf.mask.dst_port =
741                             rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
742                 }
743         }
744
745         tcp_spec = item->spec;
746         rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
747         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
748
749         return 0;
750 }
751
752 static int
753 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
754                struct rte_flow_error *error)
755 {
756         const struct rte_flow_item_udp *udp_spec;
757         const struct rte_flow_item_udp *udp_mask;
758
759         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
760         rule->key_conf.spec.ip_proto = IPPROTO_UDP;
761         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
762
763         /* Only used to describe the protocol stack. */
764         if (item->spec == NULL && item->mask == NULL)
765                 return 0;
766
767         if (item->mask) {
768                 udp_mask = item->mask;
769                 if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
770                         return rte_flow_error_set(error, EINVAL,
771                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
772                                                   item,
773                                                   "Only support src & dst port in UDP");
774                 }
775                 if (udp_mask->hdr.src_port) {
776                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
777                         rule->key_conf.mask.src_port =
778                             rte_be_to_cpu_16(udp_mask->hdr.src_port);
779                 }
780                 if (udp_mask->hdr.dst_port) {
781                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
782                         rule->key_conf.mask.dst_port =
783                             rte_be_to_cpu_16(udp_mask->hdr.dst_port);
784                 }
785         }
786
787         udp_spec = item->spec;
788         rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
789         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
790
791         return 0;
792 }
793
794 static int
795 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
796                 struct rte_flow_error *error)
797 {
798         const struct rte_flow_item_sctp *sctp_spec;
799         const struct rte_flow_item_sctp *sctp_mask;
800
801         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
802         rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
803         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
804
805         /* Only used to describe the protocol stack. */
806         if (item->spec == NULL && item->mask == NULL)
807                 return 0;
808
809         if (item->mask) {
810                 sctp_mask = item->mask;
811                 if (sctp_mask->hdr.cksum)
812                         return rte_flow_error_set(error, EINVAL,
813                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
814                                                   item,
815                                                   "Only support src & dst port in SCTP");
816                 if (sctp_mask->hdr.src_port) {
817                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
818                         rule->key_conf.mask.src_port =
819                             rte_be_to_cpu_16(sctp_mask->hdr.src_port);
820                 }
821                 if (sctp_mask->hdr.dst_port) {
822                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
823                         rule->key_conf.mask.dst_port =
824                             rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
825                 }
826                 if (sctp_mask->hdr.tag) {
827                         hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
828                         rule->key_conf.mask.sctp_tag =
829                             rte_be_to_cpu_32(sctp_mask->hdr.tag);
830                 }
831         }
832
833         sctp_spec = item->spec;
834         rule->key_conf.spec.src_port =
835             rte_be_to_cpu_16(sctp_spec->hdr.src_port);
836         rule->key_conf.spec.dst_port =
837             rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
838         rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
839
840         return 0;
841 }
842
843 /*
844  * Check items before tunnel, save inner configs to outer configs, and clear
845  * inner configs.
846  * The key consists of two parts: meta_data and tuple keys.
847  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
848  * packet(1bit).
849  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
850  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
851  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
852  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
853  * Vlantag2(16bit) and sctp-tag(32bit).
854  */
855 static int
856 hns3_handle_tunnel(const struct rte_flow_item *item,
857                    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
858 {
859         /* check eth config */
860         if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
861                 return rte_flow_error_set(error, EINVAL,
862                                           RTE_FLOW_ERROR_TYPE_ITEM,
863                                           item, "Outer eth mac is unsupported");
864         if (rule->input_set & BIT(INNER_ETH_TYPE)) {
865                 hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
866                 rule->key_conf.spec.outer_ether_type =
867                     rule->key_conf.spec.ether_type;
868                 rule->key_conf.mask.outer_ether_type =
869                     rule->key_conf.mask.ether_type;
870                 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
871                 rule->key_conf.spec.ether_type = 0;
872                 rule->key_conf.mask.ether_type = 0;
873         }
874
875         /* check vlan config */
876         if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
877                 return rte_flow_error_set(error, EINVAL,
878                                           RTE_FLOW_ERROR_TYPE_ITEM,
879                                           item,
880                                           "Outer vlan tags is unsupported");
881
882         /* clear vlan_num for inner vlan select */
883         rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
884         rule->key_conf.vlan_num = 0;
885
886         /* check L3 config */
887         if (rule->input_set &
888             (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
889                 return rte_flow_error_set(error, EINVAL,
890                                           RTE_FLOW_ERROR_TYPE_ITEM,
891                                           item, "Outer ip is unsupported");
892         if (rule->input_set & BIT(INNER_IP_PROTO)) {
893                 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
894                 rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
895                 rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
896                 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
897                 rule->key_conf.spec.ip_proto = 0;
898                 rule->key_conf.mask.ip_proto = 0;
899         }
900
901         /* check L4 config */
902         if (rule->input_set & BIT(INNER_SCTP_TAG))
903                 return rte_flow_error_set(error, EINVAL,
904                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
905                                           "Outer sctp tag is unsupported");
906
907         if (rule->input_set & BIT(INNER_SRC_PORT)) {
908                 hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
909                 rule->key_conf.spec.outer_src_port =
910                     rule->key_conf.spec.src_port;
911                 rule->key_conf.mask.outer_src_port =
912                     rule->key_conf.mask.src_port;
913                 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
914                 rule->key_conf.spec.src_port = 0;
915                 rule->key_conf.mask.src_port = 0;
916         }
917         if (rule->input_set & BIT(INNER_DST_PORT)) {
918                 hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
919                 rule->key_conf.spec.dst_port = 0;
920                 rule->key_conf.mask.dst_port = 0;
921         }
922         return 0;
923 }
924
925 static int
926 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
927                  struct rte_flow_error *error)
928 {
929         const struct rte_flow_item_vxlan *vxlan_spec;
930         const struct rte_flow_item_vxlan *vxlan_mask;
931
932         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
933         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
934         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
935                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
936         else
937                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
938
939         /* Only used to describe the protocol stack. */
940         if (item->spec == NULL && item->mask == NULL)
941                 return 0;
942
943         vxlan_mask = item->mask;
944         vxlan_spec = item->spec;
945
946         if (vxlan_mask->flags)
947                 return rte_flow_error_set(error, EINVAL,
948                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
949                                           "Flags is not supported in VxLAN");
950
951         /* VNI must be totally masked or not. */
952         if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
953             memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
954                 return rte_flow_error_set(error, EINVAL,
955                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
956                                           "VNI must be totally masked or not in VxLAN");
957         if (vxlan_mask->vni[0]) {
958                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
959                 memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
960                            VNI_OR_TNI_LEN);
961         }
962         memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
963                    VNI_OR_TNI_LEN);
964         return 0;
965 }
966
967 static int
968 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
969                  struct rte_flow_error *error)
970 {
971         const struct rte_flow_item_nvgre *nvgre_spec;
972         const struct rte_flow_item_nvgre *nvgre_mask;
973
974         hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
975         rule->key_conf.spec.outer_proto = IPPROTO_GRE;
976         rule->key_conf.mask.outer_proto = IPPROTO_MASK;
977
978         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
979         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
980         rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
981         /* Only used to describe the protocol stack. */
982         if (item->spec == NULL && item->mask == NULL)
983                 return 0;
984
985         nvgre_mask = item->mask;
986         nvgre_spec = item->spec;
987
988         if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
989                 return rte_flow_error_set(error, EINVAL,
990                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
991                                           "Ver/protocol is not supported in NVGRE");
992
993         /* TNI must be totally masked or not. */
994         if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
995             memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
996                 return rte_flow_error_set(error, EINVAL,
997                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
998                                           "TNI must be totally masked or not in NVGRE");
999
1000         if (nvgre_mask->tni[0]) {
1001                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1002                 memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
1003                            VNI_OR_TNI_LEN);
1004         }
1005         memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
1006                    VNI_OR_TNI_LEN);
1007
1008         if (nvgre_mask->flow_id) {
1009                 hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
1010                 rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
1011         }
1012         rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
1013         return 0;
1014 }
1015
1016 static int
1017 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1018                   struct rte_flow_error *error)
1019 {
1020         const struct rte_flow_item_geneve *geneve_spec;
1021         const struct rte_flow_item_geneve *geneve_mask;
1022
1023         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1024         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
1025         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1026         /* Only used to describe the protocol stack. */
1027         if (item->spec == NULL && item->mask == NULL)
1028                 return 0;
1029
1030         geneve_mask = item->mask;
1031         geneve_spec = item->spec;
1032
1033         if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
1034                 return rte_flow_error_set(error, EINVAL,
1035                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1036                                           "Ver/protocol is not supported in GENEVE");
1037         /* VNI must be totally masked or not. */
1038         if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
1039             memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
1040                 return rte_flow_error_set(error, EINVAL,
1041                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1042                                           "VNI must be totally masked or not in GENEVE");
1043         if (geneve_mask->vni[0]) {
1044                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1045                 memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
1046                            VNI_OR_TNI_LEN);
1047         }
1048         memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1049                    VNI_OR_TNI_LEN);
1050         return 0;
1051 }
1052
1053 static int
1054 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1055                   struct rte_flow_error *error)
1056 {
1057         int ret;
1058
1059         if (item->spec == NULL && item->mask)
1060                 return rte_flow_error_set(error, EINVAL,
1061                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1062                                           "Can't configure FDIR with mask "
1063                                           "but without spec");
1064         else if (item->spec && (item->mask == NULL))
1065                 return rte_flow_error_set(error, EINVAL,
1066                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1067                                           "Tunnel packets must configure "
1068                                           "with mask");
1069
1070         switch (item->type) {
1071         case RTE_FLOW_ITEM_TYPE_VXLAN:
1072         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1073                 ret = hns3_parse_vxlan(item, rule, error);
1074                 break;
1075         case RTE_FLOW_ITEM_TYPE_NVGRE:
1076                 ret = hns3_parse_nvgre(item, rule, error);
1077                 break;
1078         case RTE_FLOW_ITEM_TYPE_GENEVE:
1079                 ret = hns3_parse_geneve(item, rule, error);
1080                 break;
1081         default:
1082                 return rte_flow_error_set(error, ENOTSUP,
1083                                           RTE_FLOW_ERROR_TYPE_ITEM,
1084                                           NULL, "Unsupported tunnel type!");
1085         }
1086         if (ret)
1087                 return ret;
1088         return hns3_handle_tunnel(item, rule, error);
1089 }
1090
1091 static int
1092 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1093                   struct items_step_mngr *step_mngr,
1094                   struct rte_flow_error *error)
1095 {
1096         int ret;
1097
1098         if (item->spec == NULL && item->mask)
1099                 return rte_flow_error_set(error, EINVAL,
1100                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1101                                           "Can't configure FDIR with mask "
1102                                           "but without spec");
1103
1104         switch (item->type) {
1105         case RTE_FLOW_ITEM_TYPE_ETH:
1106                 ret = hns3_parse_eth(item, rule, error);
1107                 step_mngr->items = L2_next_items;
1108                 step_mngr->count = RTE_DIM(L2_next_items);
1109                 break;
1110         case RTE_FLOW_ITEM_TYPE_VLAN:
1111                 ret = hns3_parse_vlan(item, rule, error);
1112                 step_mngr->items = L2_next_items;
1113                 step_mngr->count = RTE_DIM(L2_next_items);
1114                 break;
1115         case RTE_FLOW_ITEM_TYPE_IPV4:
1116                 ret = hns3_parse_ipv4(item, rule, error);
1117                 step_mngr->items = L3_next_items;
1118                 step_mngr->count = RTE_DIM(L3_next_items);
1119                 break;
1120         case RTE_FLOW_ITEM_TYPE_IPV6:
1121                 ret = hns3_parse_ipv6(item, rule, error);
1122                 step_mngr->items = L3_next_items;
1123                 step_mngr->count = RTE_DIM(L3_next_items);
1124                 break;
1125         case RTE_FLOW_ITEM_TYPE_TCP:
1126                 ret = hns3_parse_tcp(item, rule, error);
1127                 step_mngr->items = L4_next_items;
1128                 step_mngr->count = RTE_DIM(L4_next_items);
1129                 break;
1130         case RTE_FLOW_ITEM_TYPE_UDP:
1131                 ret = hns3_parse_udp(item, rule, error);
1132                 step_mngr->items = L4_next_items;
1133                 step_mngr->count = RTE_DIM(L4_next_items);
1134                 break;
1135         case RTE_FLOW_ITEM_TYPE_SCTP:
1136                 ret = hns3_parse_sctp(item, rule, error);
1137                 step_mngr->items = L4_next_items;
1138                 step_mngr->count = RTE_DIM(L4_next_items);
1139                 break;
1140         default:
1141                 return rte_flow_error_set(error, ENOTSUP,
1142                                           RTE_FLOW_ERROR_TYPE_ITEM,
1143                                           NULL, "Unsupported normal type!");
1144         }
1145
1146         return ret;
1147 }
1148
1149 static int
1150 hns3_validate_item(const struct rte_flow_item *item,
1151                    struct items_step_mngr step_mngr,
1152                    struct rte_flow_error *error)
1153 {
1154         int i;
1155
1156         if (item->last)
1157                 return rte_flow_error_set(error, ENOTSUP,
1158                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1159                                           "Not supported last point for range");
1160
1161         for (i = 0; i < step_mngr.count; i++) {
1162                 if (item->type == step_mngr.items[i])
1163                         break;
1164         }
1165
1166         if (i == step_mngr.count) {
1167                 return rte_flow_error_set(error, EINVAL,
1168                                           RTE_FLOW_ERROR_TYPE_ITEM,
1169                                           item, "Inval or missing item");
1170         }
1171         return 0;
1172 }
1173
1174 static inline bool
1175 is_tunnel_packet(enum rte_flow_item_type type)
1176 {
1177         if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1178             type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1179             type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1180             type == RTE_FLOW_ITEM_TYPE_GENEVE)
1181                 return true;
1182         return false;
1183 }
1184
1185 /*
1186  * Parse the flow director rule.
1187  * The supported PATTERN:
1188  *   case: non-tunnel packet:
1189  *     ETH : src-mac, dst-mac, ethertype
1190  *     VLAN: tag1, tag2
1191  *     IPv4: src-ip, dst-ip, tos, proto
1192  *     IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto
1193  *     UDP : src-port, dst-port
1194  *     TCP : src-port, dst-port
1195  *     SCTP: src-port, dst-port, tag
1196  *   case: tunnel packet:
1197  *     OUTER-ETH: ethertype
1198  *     OUTER-L3 : proto
1199  *     OUTER-L4 : src-port, dst-port
1200  *     TUNNEL   : vni, flow-id(only valid when NVGRE)
1201  *     INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet
1202  * The supported ACTION:
1203  *    QUEUE
1204  *    DROP
1205  *    COUNT
1206  *    MARK: the id range [0, 4094]
1207  *    FLAG
1208  *    RSS: only valid if firmware support FD_QUEUE_REGION.
1209  */
1210 static int
1211 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1212                        const struct rte_flow_item pattern[],
1213                        const struct rte_flow_action actions[],
1214                        struct hns3_fdir_rule *rule,
1215                        struct rte_flow_error *error)
1216 {
1217         struct hns3_adapter *hns = dev->data->dev_private;
1218         const struct rte_flow_item *item;
1219         struct items_step_mngr step_mngr;
1220         int ret;
1221
1222         /* FDIR is available only in PF driver */
1223         if (hns->is_vf)
1224                 return rte_flow_error_set(error, ENOTSUP,
1225                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1226                                           "Fdir not supported in VF");
1227
1228         step_mngr.items = first_items;
1229         step_mngr.count = RTE_DIM(first_items);
1230         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1231                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1232                         continue;
1233
1234                 ret = hns3_validate_item(item, step_mngr, error);
1235                 if (ret)
1236                         return ret;
1237
1238                 if (is_tunnel_packet(item->type)) {
1239                         ret = hns3_parse_tunnel(item, rule, error);
1240                         if (ret)
1241                                 return ret;
1242                         step_mngr.items = tunnel_next_items;
1243                         step_mngr.count = RTE_DIM(tunnel_next_items);
1244                 } else {
1245                         ret = hns3_parse_normal(item, rule, &step_mngr, error);
1246                         if (ret)
1247                                 return ret;
1248                 }
1249         }
1250
1251         return hns3_handle_actions(dev, actions, rule, error);
1252 }
1253
1254 static void
1255 hns3_filterlist_flush(struct rte_eth_dev *dev)
1256 {
1257         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1258         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1259         struct hns3_rss_conf_ele *rss_filter_ptr;
1260         struct hns3_flow_mem *flow_node;
1261
1262         fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1263         while (fdir_rule_ptr) {
1264                 TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1265                 rte_free(fdir_rule_ptr);
1266                 fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1267         }
1268
1269         rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1270         while (rss_filter_ptr) {
1271                 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1272                 rte_free(rss_filter_ptr);
1273                 rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1274         }
1275
1276         flow_node = TAILQ_FIRST(&hw->flow_list);
1277         while (flow_node) {
1278                 TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1279                 rte_free(flow_node->flow);
1280                 rte_free(flow_node);
1281                 flow_node = TAILQ_FIRST(&hw->flow_list);
1282         }
1283 }
1284
1285 static bool
1286 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1287                      const struct rte_flow_action_rss *with)
1288 {
1289         bool rss_key_is_same;
1290         bool func_is_same;
1291
1292         /*
1293          * When user flush all RSS rule, RSS func is set invalid with
1294          * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1295          * flushed, any validate RSS func is different with it before
1296          * flushed. Others, when user create an action RSS with RSS func
1297          * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1298          * between continuous RSS flow.
1299          */
1300         if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1301                 func_is_same = false;
1302         else
1303                 func_is_same = (with->func != RTE_ETH_HASH_FUNCTION_DEFAULT) ?
1304                                 (comp->func == with->func) : true;
1305
1306         if (with->key_len == 0 || with->key == NULL)
1307                 rss_key_is_same = 1;
1308         else
1309                 rss_key_is_same = comp->key_len == with->key_len &&
1310                 !memcmp(comp->key, with->key, with->key_len);
1311
1312         return (func_is_same && rss_key_is_same &&
1313                 comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1314                 comp->level == with->level &&
1315                 comp->queue_num == with->queue_num &&
1316                 !memcmp(comp->queue, with->queue,
1317                         sizeof(*with->queue) * with->queue_num));
1318 }
1319
1320 static int
1321 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1322                    const struct rte_flow_action_rss *in)
1323 {
1324         if (in->key_len > RTE_DIM(out->key) ||
1325             in->queue_num > RTE_DIM(out->queue))
1326                 return -EINVAL;
1327         if (in->key == NULL && in->key_len)
1328                 return -EINVAL;
1329         out->conf = (struct rte_flow_action_rss) {
1330                 .func = in->func,
1331                 .level = in->level,
1332                 .types = in->types,
1333                 .key_len = in->key_len,
1334                 .queue_num = in->queue_num,
1335         };
1336         out->conf.queue = memcpy(out->queue, in->queue,
1337                                 sizeof(*in->queue) * in->queue_num);
1338         if (in->key)
1339                 out->conf.key = memcpy(out->key, in->key, in->key_len);
1340
1341         return 0;
1342 }
1343
1344 static bool
1345 hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1346                                const struct rte_flow_action_rss *rss)
1347 {
1348         /*
1349          * For IP packet, it is not supported to use src/dst port fields to RSS
1350          * hash for the following packet types.
1351          * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1352          * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1353          * port fields to RSS hash for IPV6 SCTP packet type. However, the
1354          * Kunpeng930 and future kunpeng series support to use src/dst port
1355          * fields to RSS hash for IPv6 SCTP packet type.
1356          */
1357         if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
1358             (rss->types & RTE_ETH_RSS_IP ||
1359             (!hw->rss_info.ipv6_sctp_offload_supported &&
1360             rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
1361                 return false;
1362
1363         return true;
1364 }
1365
1366 /*
1367  * This function is used to parse rss action validation.
1368  */
1369 static int
1370 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1371                       const struct rte_flow_action *actions,
1372                       struct rte_flow_error *error)
1373 {
1374         struct hns3_adapter *hns = dev->data->dev_private;
1375         struct hns3_hw *hw = &hns->hw;
1376         struct hns3_rss_conf *rss_conf = &hw->rss_info;
1377         const struct rte_flow_action_rss *rss;
1378         const struct rte_flow_action *act;
1379         uint32_t act_index = 0;
1380         uint16_t n;
1381
1382         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1383         rss = act->conf;
1384
1385         if (rss == NULL) {
1386                 return rte_flow_error_set(error, EINVAL,
1387                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1388                                           act, "no valid queues");
1389         }
1390
1391         if (rss->queue_num > RTE_DIM(rss_conf->queue))
1392                 return rte_flow_error_set(error, ENOTSUP,
1393                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1394                                           "queue number configured exceeds "
1395                                           "queue buffer size driver supported");
1396
1397         for (n = 0; n < rss->queue_num; n++) {
1398                 if (rss->queue[n] < hw->alloc_rss_size)
1399                         continue;
1400                 return rte_flow_error_set(error, EINVAL,
1401                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1402                                           "queue id must be less than queue number allocated to a TC");
1403         }
1404
1405         if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1406                 return rte_flow_error_set(error, EINVAL,
1407                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1408                                           act,
1409                                           "Flow types is unsupported by "
1410                                           "hns3's RSS");
1411         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1412                 return rte_flow_error_set(error, ENOTSUP,
1413                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1414                                           "RSS hash func are not supported");
1415         if (rss->level)
1416                 return rte_flow_error_set(error, ENOTSUP,
1417                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1418                                           "a nonzero RSS encapsulation level is not supported");
1419         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1420                 return rte_flow_error_set(error, ENOTSUP,
1421                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1422                                           "RSS hash key must be exactly 40 bytes");
1423
1424         if (!hns3_rss_input_tuple_supported(hw, rss))
1425                 return rte_flow_error_set(error, EINVAL,
1426                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1427                                           &rss->types,
1428                                           "input RSS types are not supported");
1429
1430         act_index++;
1431
1432         /* Check if the next not void action is END */
1433         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1434         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1435                 memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1436                 return rte_flow_error_set(error, EINVAL,
1437                                           RTE_FLOW_ERROR_TYPE_ACTION,
1438                                           act, "Not supported action.");
1439         }
1440
1441         return 0;
1442 }
1443
1444 static int
1445 hns3_disable_rss(struct hns3_hw *hw)
1446 {
1447         int ret;
1448
1449         ret = hns3_set_rss_tuple_by_rss_hf(hw, &hw->rss_info.rss_tuple_sets, 0);
1450         if (ret)
1451                 return ret;
1452         hw->rss_dis_flag = true;
1453
1454         return 0;
1455 }
1456
1457 static void
1458 hns3_adjust_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1459 {
1460         if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1461                 hns3_warn(hw, "Default RSS hash key to be set");
1462                 rss_conf->key = hns3_hash_key;
1463                 rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1464         }
1465 }
1466
1467 static int
1468 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1469                          uint8_t *hash_algo)
1470 {
1471         enum rte_eth_hash_function algo_func = *func;
1472         switch (algo_func) {
1473         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1474                 /* Keep *hash_algo as what it used to be */
1475                 algo_func = hw->rss_info.conf.func;
1476                 break;
1477         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1478                 *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1479                 break;
1480         case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1481                 *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1482                 break;
1483         case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1484                 *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1485                 break;
1486         default:
1487                 hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
1488                          algo_func);
1489                 return -EINVAL;
1490         }
1491         *func = algo_func;
1492
1493         return 0;
1494 }
1495
1496 static int
1497 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1498 {
1499         struct hns3_rss_tuple_cfg *tuple;
1500         int ret;
1501
1502         hns3_adjust_rss_key(hw, rss_config);
1503
1504         ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1505                                        &hw->rss_info.hash_algo);
1506         if (ret)
1507                 return ret;
1508
1509         ret = hns3_rss_set_algo_key(hw, rss_config->key);
1510         if (ret)
1511                 return ret;
1512
1513         hw->rss_info.conf.func = rss_config->func;
1514
1515         tuple = &hw->rss_info.rss_tuple_sets;
1516         ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1517         if (ret)
1518                 hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1519
1520         return ret;
1521 }
1522
1523 static int
1524 hns3_update_indir_table(struct rte_eth_dev *dev,
1525                         const struct rte_flow_action_rss *conf, uint16_t num)
1526 {
1527         struct hns3_adapter *hns = dev->data->dev_private;
1528         struct hns3_hw *hw = &hns->hw;
1529         uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1530         uint16_t j;
1531         uint32_t i;
1532
1533         /* Fill in redirection table */
1534         memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1535                sizeof(hw->rss_info.rss_indirection_tbl));
1536         for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1537                 j %= num;
1538                 if (conf->queue[j] >= hw->alloc_rss_size) {
1539                         hns3_err(hw, "queue id(%u) set to redirection table "
1540                                  "exceeds queue number(%u) allocated to a TC.",
1541                                  conf->queue[j], hw->alloc_rss_size);
1542                         return -EINVAL;
1543                 }
1544                 indir_tbl[i] = conf->queue[j];
1545         }
1546
1547         return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1548 }
1549
1550 static int
1551 hns3_config_rss_filter(struct rte_eth_dev *dev,
1552                        const struct hns3_rss_conf *conf, bool add)
1553 {
1554         struct hns3_adapter *hns = dev->data->dev_private;
1555         struct hns3_rss_conf_ele *rss_filter_ptr;
1556         struct hns3_hw *hw = &hns->hw;
1557         struct hns3_rss_conf *rss_info;
1558         uint64_t flow_types;
1559         uint16_t num;
1560         int ret;
1561
1562         struct rte_flow_action_rss rss_flow_conf = {
1563                 .func = conf->conf.func,
1564                 .level = conf->conf.level,
1565                 .types = conf->conf.types,
1566                 .key_len = conf->conf.key_len,
1567                 .queue_num = conf->conf.queue_num,
1568                 .key = conf->conf.key_len ?
1569                     (void *)(uintptr_t)conf->conf.key : NULL,
1570                 .queue = conf->conf.queue,
1571         };
1572
1573         /* Filter the unsupported flow types */
1574         flow_types = conf->conf.types ?
1575                      rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1576                      hw->rss_info.conf.types;
1577         if (flow_types != rss_flow_conf.types)
1578                 hns3_warn(hw, "modified RSS types based on hardware support, "
1579                               "requested:0x%" PRIx64 " configured:0x%" PRIx64,
1580                           rss_flow_conf.types, flow_types);
1581         /* Update the useful flow types */
1582         rss_flow_conf.types = flow_types;
1583
1584         rss_info = &hw->rss_info;
1585         if (!add) {
1586                 if (!conf->valid)
1587                         return 0;
1588
1589                 ret = hns3_disable_rss(hw);
1590                 if (ret) {
1591                         hns3_err(hw, "RSS disable failed(%d)", ret);
1592                         return ret;
1593                 }
1594
1595                 if (rss_flow_conf.queue_num) {
1596                         /*
1597                          * Due the content of queue pointer have been reset to
1598                          * 0, the rss_info->conf.queue should be set to NULL
1599                          */
1600                         rss_info->conf.queue = NULL;
1601                         rss_info->conf.queue_num = 0;
1602                 }
1603
1604                 /* set RSS func invalid after flushed */
1605                 rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1606                 return 0;
1607         }
1608
1609         /* Set rx queues to use */
1610         num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1611         if (rss_flow_conf.queue_num > num)
1612                 hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1613                           rss_flow_conf.queue_num);
1614         hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1615
1616         rte_spinlock_lock(&hw->lock);
1617         if (num) {
1618                 ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1619                 if (ret)
1620                         goto rss_config_err;
1621         }
1622
1623         /* Set hash algorithm and flow types by the user's config */
1624         ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1625         if (ret)
1626                 goto rss_config_err;
1627
1628         ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1629         if (ret) {
1630                 hns3_err(hw, "RSS config init fail(%d)", ret);
1631                 goto rss_config_err;
1632         }
1633
1634         /*
1635          * When create a new RSS rule, the old rule will be overlaid and set
1636          * invalid.
1637          */
1638         TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries)
1639                 rss_filter_ptr->filter_info.valid = false;
1640
1641 rss_config_err:
1642         rte_spinlock_unlock(&hw->lock);
1643
1644         return ret;
1645 }
1646
1647 static int
1648 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1649 {
1650         struct hns3_adapter *hns = dev->data->dev_private;
1651         struct hns3_rss_conf_ele *rss_filter_ptr;
1652         struct hns3_hw *hw = &hns->hw;
1653         int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1654         int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1655         int ret = 0;
1656
1657         rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1658         while (rss_filter_ptr) {
1659                 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1660                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1661                                              false);
1662                 if (ret)
1663                         rss_rule_fail_cnt++;
1664                 else
1665                         rss_rule_succ_cnt++;
1666                 rte_free(rss_filter_ptr);
1667                 rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1668         }
1669
1670         if (rss_rule_fail_cnt) {
1671                 hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1672                              "fail num = %d", rss_rule_succ_cnt,
1673                              rss_rule_fail_cnt);
1674                 ret = -EIO;
1675         }
1676
1677         return ret;
1678 }
1679
1680 int
1681 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1682 {
1683         struct hns3_adapter *hns = dev->data->dev_private;
1684         struct hns3_hw *hw = &hns->hw;
1685
1686         /* When user flush all rules, it doesn't need to restore RSS rule */
1687         if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1688                 return 0;
1689
1690         return hns3_config_rss_filter(dev, &hw->rss_info, true);
1691 }
1692
1693 static int
1694 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1695                     const struct hns3_rss_conf *conf, bool add)
1696 {
1697         struct hns3_adapter *hns = dev->data->dev_private;
1698         struct hns3_hw *hw = &hns->hw;
1699         bool ret;
1700
1701         ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1702         if (ret) {
1703                 hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1704                 return -EINVAL;
1705         }
1706
1707         return hns3_config_rss_filter(dev, conf, add);
1708 }
1709
1710 static int
1711 hns3_flow_args_check(const struct rte_flow_attr *attr,
1712                      const struct rte_flow_item pattern[],
1713                      const struct rte_flow_action actions[],
1714                      struct rte_flow_error *error)
1715 {
1716         if (pattern == NULL)
1717                 return rte_flow_error_set(error, EINVAL,
1718                                           RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1719                                           NULL, "NULL pattern.");
1720
1721         if (actions == NULL)
1722                 return rte_flow_error_set(error, EINVAL,
1723                                           RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1724                                           NULL, "NULL action.");
1725
1726         if (attr == NULL)
1727                 return rte_flow_error_set(error, EINVAL,
1728                                           RTE_FLOW_ERROR_TYPE_ATTR,
1729                                           NULL, "NULL attribute.");
1730
1731         return hns3_check_attr(attr, error);
1732 }
1733
1734 /*
1735  * Check if the flow rule is supported by hns3.
1736  * It only checks the format. Don't guarantee the rule can be programmed into
1737  * the HW. Because there can be no enough room for the rule.
1738  */
1739 static int
1740 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1741                    const struct rte_flow_item pattern[],
1742                    const struct rte_flow_action actions[],
1743                    struct rte_flow_error *error)
1744 {
1745         struct hns3_fdir_rule fdir_rule;
1746         int ret;
1747
1748         ret = hns3_flow_args_check(attr, pattern, actions, error);
1749         if (ret)
1750                 return ret;
1751
1752         if (hns3_find_rss_general_action(pattern, actions))
1753                 return hns3_parse_rss_filter(dev, actions, error);
1754
1755         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1756         return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1757 }
1758
1759 static int
1760 hns3_flow_create_rss_rule(struct rte_eth_dev *dev,
1761                           const struct rte_flow_action *act,
1762                           struct rte_flow *flow)
1763 {
1764         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1765         struct hns3_rss_conf_ele *rss_filter_ptr;
1766         const struct hns3_rss_conf *rss_conf;
1767         int ret;
1768
1769         rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1770                                      sizeof(struct hns3_rss_conf_ele), 0);
1771         if (rss_filter_ptr == NULL) {
1772                 hns3_err(hw, "failed to allocate hns3_rss_filter memory");
1773                 return -ENOMEM;
1774         }
1775
1776         /*
1777          * After all the preceding tasks are successfully configured, configure
1778          * rules to the hardware to simplify the rollback of rules in the
1779          * hardware.
1780          */
1781         rss_conf = (const struct hns3_rss_conf *)act->conf;
1782         ret = hns3_flow_parse_rss(dev, rss_conf, true);
1783         if (ret != 0) {
1784                 rte_free(rss_filter_ptr);
1785                 return ret;
1786         }
1787
1788         hns3_rss_conf_copy(&rss_filter_ptr->filter_info, &rss_conf->conf);
1789         rss_filter_ptr->filter_info.valid = true;
1790         TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
1791         flow->rule = rss_filter_ptr;
1792         flow->filter_type = RTE_ETH_FILTER_HASH;
1793
1794         return 0;
1795 }
1796
1797 static int
1798 hns3_flow_create_fdir_rule(struct rte_eth_dev *dev,
1799                            const struct rte_flow_item pattern[],
1800                            const struct rte_flow_action actions[],
1801                            struct rte_flow_error *error,
1802                            struct rte_flow *flow)
1803 {
1804         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1805         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1806         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1807         struct hns3_fdir_rule fdir_rule;
1808         bool indir;
1809         int ret;
1810
1811         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1812         ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1813         if (ret != 0)
1814                 return ret;
1815
1816         indir = !!(fdir_rule.flags & HNS3_RULE_FLAG_COUNTER_INDIR);
1817         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1818                 ret = hns3_counter_new(dev, indir, fdir_rule.act_cnt.id,
1819                                        error);
1820                 if (ret != 0)
1821                         return ret;
1822
1823                 flow->counter_id = fdir_rule.act_cnt.id;
1824         }
1825
1826         fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1827                                     sizeof(struct hns3_fdir_rule_ele), 0);
1828         if (fdir_rule_ptr == NULL) {
1829                 hns3_err(hw, "failed to allocate fdir_rule memory.");
1830                 ret = -ENOMEM;
1831                 goto err_malloc;
1832         }
1833
1834         /*
1835          * After all the preceding tasks are successfully configured, configure
1836          * rules to the hardware to simplify the rollback of rules in the
1837          * hardware.
1838          */
1839         ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1840         if (ret != 0)
1841                 goto err_fdir_filter;
1842
1843         memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1844                 sizeof(struct hns3_fdir_rule));
1845         TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1846         flow->rule = fdir_rule_ptr;
1847         flow->filter_type = RTE_ETH_FILTER_FDIR;
1848
1849         return 0;
1850
1851 err_fdir_filter:
1852         rte_free(fdir_rule_ptr);
1853 err_malloc:
1854         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1855                 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1856
1857         return ret;
1858 }
1859
1860 /*
1861  * Create or destroy a flow rule.
1862  * Theorically one rule can match more than one filters.
1863  * We will let it use the filter which it hit first.
1864  * So, the sequence matters.
1865  */
1866 static struct rte_flow *
1867 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1868                  const struct rte_flow_item pattern[],
1869                  const struct rte_flow_action actions[],
1870                  struct rte_flow_error *error)
1871 {
1872         struct hns3_adapter *hns = dev->data->dev_private;
1873         struct hns3_hw *hw = &hns->hw;
1874         struct hns3_flow_mem *flow_node;
1875         const struct rte_flow_action *act;
1876         struct rte_flow *flow;
1877         int ret;
1878
1879         ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1880         if (ret)
1881                 return NULL;
1882
1883         flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1884         if (flow == NULL) {
1885                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1886                                    NULL, "Failed to allocate flow memory");
1887                 return NULL;
1888         }
1889         flow_node = rte_zmalloc("hns3 flow node",
1890                                 sizeof(struct hns3_flow_mem), 0);
1891         if (flow_node == NULL) {
1892                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1893                                    NULL, "Failed to allocate flow list memory");
1894                 rte_free(flow);
1895                 return NULL;
1896         }
1897
1898         flow_node->flow = flow;
1899         TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
1900
1901         act = hns3_find_rss_general_action(pattern, actions);
1902         if (act)
1903                 ret = hns3_flow_create_rss_rule(dev, act, flow);
1904         else
1905                 ret = hns3_flow_create_fdir_rule(dev, pattern, actions,
1906                                                  error, flow);
1907         if (ret == 0)
1908                 return flow;
1909
1910         rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1911                            "Failed to create flow");
1912         TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1913         rte_free(flow_node);
1914         rte_free(flow);
1915
1916         return NULL;
1917 }
1918
1919 /* Destroy a flow rule on hns3. */
1920 static int
1921 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1922                   struct rte_flow_error *error)
1923 {
1924         struct hns3_adapter *hns = dev->data->dev_private;
1925         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1926         struct hns3_rss_conf_ele *rss_filter_ptr;
1927         struct hns3_flow_mem *flow_node;
1928         enum rte_filter_type filter_type;
1929         struct hns3_fdir_rule fdir_rule;
1930         struct hns3_hw *hw = &hns->hw;
1931         int ret;
1932
1933         if (flow == NULL)
1934                 return rte_flow_error_set(error, EINVAL,
1935                                           RTE_FLOW_ERROR_TYPE_HANDLE,
1936                                           flow, "Flow is NULL");
1937
1938         filter_type = flow->filter_type;
1939         switch (filter_type) {
1940         case RTE_ETH_FILTER_FDIR:
1941                 fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1942                 memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1943                            sizeof(struct hns3_fdir_rule));
1944
1945                 ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1946                 if (ret)
1947                         return rte_flow_error_set(error, EIO,
1948                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1949                                                   flow,
1950                                                   "Destroy FDIR fail.Try again");
1951                 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1952                         hns3_counter_release(dev, fdir_rule.act_cnt.id);
1953                 TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1954                 rte_free(fdir_rule_ptr);
1955                 fdir_rule_ptr = NULL;
1956                 break;
1957         case RTE_ETH_FILTER_HASH:
1958                 rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1959                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1960                                              false);
1961                 if (ret)
1962                         return rte_flow_error_set(error, EIO,
1963                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1964                                                   flow,
1965                                                   "Destroy RSS fail.Try again");
1966                 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1967                 rte_free(rss_filter_ptr);
1968                 rss_filter_ptr = NULL;
1969                 break;
1970         default:
1971                 return rte_flow_error_set(error, EINVAL,
1972                                           RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1973                                           "Unsupported filter type");
1974         }
1975
1976         TAILQ_FOREACH(flow_node, &hw->flow_list, entries) {
1977                 if (flow_node->flow == flow) {
1978                         TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1979                         rte_free(flow_node);
1980                         flow_node = NULL;
1981                         break;
1982                 }
1983         }
1984         rte_free(flow);
1985
1986         return 0;
1987 }
1988
1989 /*  Destroy all flow rules associated with a port on hns3. */
1990 static int
1991 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1992 {
1993         struct hns3_adapter *hns = dev->data->dev_private;
1994         int ret;
1995
1996         /* FDIR is available only in PF driver */
1997         if (!hns->is_vf) {
1998                 ret = hns3_clear_all_fdir_filter(hns);
1999                 if (ret) {
2000                         rte_flow_error_set(error, ret,
2001                                            RTE_FLOW_ERROR_TYPE_HANDLE,
2002                                            NULL, "Failed to flush rule");
2003                         return ret;
2004                 }
2005                 hns3_counter_flush(dev);
2006         }
2007
2008         ret = hns3_clear_rss_filter(dev);
2009         if (ret) {
2010                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
2011                                    NULL, "Failed to flush rss filter");
2012                 return ret;
2013         }
2014
2015         hns3_filterlist_flush(dev);
2016
2017         return 0;
2018 }
2019
2020 /* Query an existing flow rule. */
2021 static int
2022 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
2023                 const struct rte_flow_action *actions, void *data,
2024                 struct rte_flow_error *error)
2025 {
2026         struct rte_flow_action_rss *rss_conf;
2027         struct hns3_rss_conf_ele *rss_rule;
2028         struct rte_flow_query_count *qc;
2029         int ret;
2030
2031         if (!flow->rule)
2032                 return rte_flow_error_set(error, EINVAL,
2033                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
2034
2035         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2036                 switch (actions->type) {
2037                 case RTE_FLOW_ACTION_TYPE_VOID:
2038                         break;
2039                 case RTE_FLOW_ACTION_TYPE_COUNT:
2040                         qc = (struct rte_flow_query_count *)data;
2041                         ret = hns3_counter_query(dev, flow, qc, error);
2042                         if (ret)
2043                                 return ret;
2044                         break;
2045                 case RTE_FLOW_ACTION_TYPE_RSS:
2046                         if (flow->filter_type != RTE_ETH_FILTER_HASH) {
2047                                 return rte_flow_error_set(error, ENOTSUP,
2048                                         RTE_FLOW_ERROR_TYPE_ACTION,
2049                                         actions, "action is not supported");
2050                         }
2051                         rss_conf = (struct rte_flow_action_rss *)data;
2052                         rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
2053                         rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
2054                                    sizeof(struct rte_flow_action_rss));
2055                         break;
2056                 default:
2057                         return rte_flow_error_set(error, ENOTSUP,
2058                                 RTE_FLOW_ERROR_TYPE_ACTION,
2059                                 actions, "action is not supported");
2060                 }
2061         }
2062
2063         return 0;
2064 }
2065
2066 static int
2067 hns3_flow_validate_wrap(struct rte_eth_dev *dev,
2068                         const struct rte_flow_attr *attr,
2069                         const struct rte_flow_item pattern[],
2070                         const struct rte_flow_action actions[],
2071                         struct rte_flow_error *error)
2072 {
2073         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2074         int ret;
2075
2076         pthread_mutex_lock(&hw->flows_lock);
2077         ret = hns3_flow_validate(dev, attr, pattern, actions, error);
2078         pthread_mutex_unlock(&hw->flows_lock);
2079
2080         return ret;
2081 }
2082
2083 static struct rte_flow *
2084 hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2085                       const struct rte_flow_item pattern[],
2086                       const struct rte_flow_action actions[],
2087                       struct rte_flow_error *error)
2088 {
2089         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2090         struct rte_flow *flow;
2091
2092         pthread_mutex_lock(&hw->flows_lock);
2093         flow = hns3_flow_create(dev, attr, pattern, actions, error);
2094         pthread_mutex_unlock(&hw->flows_lock);
2095
2096         return flow;
2097 }
2098
2099 static int
2100 hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2101                        struct rte_flow_error *error)
2102 {
2103         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2104         int ret;
2105
2106         pthread_mutex_lock(&hw->flows_lock);
2107         ret = hns3_flow_destroy(dev, flow, error);
2108         pthread_mutex_unlock(&hw->flows_lock);
2109
2110         return ret;
2111 }
2112
2113 static int
2114 hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
2115 {
2116         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2117         int ret;
2118
2119         pthread_mutex_lock(&hw->flows_lock);
2120         ret = hns3_flow_flush(dev, error);
2121         pthread_mutex_unlock(&hw->flows_lock);
2122
2123         return ret;
2124 }
2125
2126 static int
2127 hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2128                      const struct rte_flow_action *actions, void *data,
2129                      struct rte_flow_error *error)
2130 {
2131         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2132         int ret;
2133
2134         pthread_mutex_lock(&hw->flows_lock);
2135         ret = hns3_flow_query(dev, flow, actions, data, error);
2136         pthread_mutex_unlock(&hw->flows_lock);
2137
2138         return ret;
2139 }
2140
2141 static int
2142 hns3_check_indir_action(const struct rte_flow_indir_action_conf *conf,
2143                         const struct rte_flow_action *action,
2144                         struct rte_flow_error *error)
2145 {
2146         if (!conf->ingress)
2147                 return rte_flow_error_set(error, EINVAL,
2148                                 RTE_FLOW_ERROR_TYPE_ACTION,
2149                                 NULL, "Indir action ingress can't be zero");
2150
2151         if (conf->egress)
2152                 return rte_flow_error_set(error, EINVAL,
2153                                 RTE_FLOW_ERROR_TYPE_ACTION,
2154                                 NULL, "Indir action not support egress");
2155
2156         if (conf->transfer)
2157                 return rte_flow_error_set(error, EINVAL,
2158                                 RTE_FLOW_ERROR_TYPE_ACTION,
2159                                 NULL, "Indir action not support transfer");
2160
2161         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
2162                 return rte_flow_error_set(error, EINVAL,
2163                                 RTE_FLOW_ERROR_TYPE_ACTION,
2164                                 NULL, "Indir action only support count");
2165
2166         return 0;
2167 }
2168
2169 static struct rte_flow_action_handle *
2170 hns3_flow_action_create(struct rte_eth_dev *dev,
2171                         const struct rte_flow_indir_action_conf *conf,
2172                         const struct rte_flow_action *action,
2173                         struct rte_flow_error *error)
2174 {
2175         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2176         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2177         const struct rte_flow_action_count *act_count;
2178         struct rte_flow_action_handle *handle = NULL;
2179         struct hns3_flow_counter *counter;
2180
2181         if (hns3_check_indir_action(conf, action, error))
2182                 return NULL;
2183
2184         handle = rte_zmalloc("hns3 action handle",
2185                              sizeof(struct rte_flow_action_handle), 0);
2186         if (handle == NULL) {
2187                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2188                                    NULL, "Failed to allocate action memory");
2189                 return NULL;
2190         }
2191
2192         pthread_mutex_lock(&hw->flows_lock);
2193
2194         act_count = (const struct rte_flow_action_count *)action->conf;
2195         if (act_count->id >= pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]) {
2196                 rte_flow_error_set(error, EINVAL,
2197                                    RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2198                                    action, "Invalid counter id");
2199                 goto err_exit;
2200         }
2201
2202         if (hns3_counter_new(dev, false, act_count->id, error))
2203                 goto err_exit;
2204
2205         counter = hns3_counter_lookup(dev, act_count->id);
2206         if (counter == NULL) {
2207                 rte_flow_error_set(error, EINVAL,
2208                                    RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2209                                    action, "Counter id not found");
2210                 goto err_exit;
2211         }
2212
2213         counter->indirect = true;
2214         handle->indirect_type = HNS3_INDIRECT_ACTION_TYPE_COUNT;
2215         handle->counter_id = counter->id;
2216
2217         pthread_mutex_unlock(&hw->flows_lock);
2218         return handle;
2219
2220 err_exit:
2221         pthread_mutex_unlock(&hw->flows_lock);
2222         rte_free(handle);
2223         return NULL;
2224 }
2225
2226 static int
2227 hns3_flow_action_destroy(struct rte_eth_dev *dev,
2228                          struct rte_flow_action_handle *handle,
2229                          struct rte_flow_error *error)
2230 {
2231         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2232         struct hns3_flow_counter *counter;
2233
2234         pthread_mutex_lock(&hw->flows_lock);
2235
2236         if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2237                 pthread_mutex_unlock(&hw->flows_lock);
2238                 return rte_flow_error_set(error, EINVAL,
2239                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2240                                         handle, "Invalid indirect type");
2241         }
2242
2243         counter = hns3_counter_lookup(dev, handle->counter_id);
2244         if (counter == NULL) {
2245                 pthread_mutex_unlock(&hw->flows_lock);
2246                 return rte_flow_error_set(error, EINVAL,
2247                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2248                                 handle, "Counter id not exist");
2249         }
2250
2251         if (counter->ref_cnt > 1) {
2252                 pthread_mutex_unlock(&hw->flows_lock);
2253                 return rte_flow_error_set(error, EBUSY,
2254                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2255                                 handle, "Counter id in use");
2256         }
2257
2258         (void)hns3_counter_release(dev, handle->counter_id);
2259         rte_free(handle);
2260
2261         pthread_mutex_unlock(&hw->flows_lock);
2262         return 0;
2263 }
2264
2265 static int
2266 hns3_flow_action_query(struct rte_eth_dev *dev,
2267                  const struct rte_flow_action_handle *handle,
2268                  void *data,
2269                  struct rte_flow_error *error)
2270 {
2271         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2272         struct rte_flow flow;
2273         int ret;
2274
2275         pthread_mutex_lock(&hw->flows_lock);
2276
2277         if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2278                 pthread_mutex_unlock(&hw->flows_lock);
2279                 return rte_flow_error_set(error, EINVAL,
2280                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2281                                         handle, "Invalid indirect type");
2282         }
2283
2284         memset(&flow, 0, sizeof(flow));
2285         flow.counter_id = handle->counter_id;
2286         ret = hns3_counter_query(dev, &flow,
2287                                  (struct rte_flow_query_count *)data, error);
2288         pthread_mutex_unlock(&hw->flows_lock);
2289         return ret;
2290 }
2291
2292 static const struct rte_flow_ops hns3_flow_ops = {
2293         .validate = hns3_flow_validate_wrap,
2294         .create = hns3_flow_create_wrap,
2295         .destroy = hns3_flow_destroy_wrap,
2296         .flush = hns3_flow_flush_wrap,
2297         .query = hns3_flow_query_wrap,
2298         .isolate = NULL,
2299         .action_handle_create = hns3_flow_action_create,
2300         .action_handle_destroy = hns3_flow_action_destroy,
2301         .action_handle_query = hns3_flow_action_query,
2302 };
2303
2304 int
2305 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2306                       const struct rte_flow_ops **ops)
2307 {
2308         struct hns3_hw *hw;
2309
2310         hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2311         if (hw->adapter_state >= HNS3_NIC_CLOSED)
2312                 return -ENODEV;
2313
2314         *ops = &hns3_flow_ops;
2315         return 0;
2316 }
2317
2318 void
2319 hns3_flow_init(struct rte_eth_dev *dev)
2320 {
2321         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2322         pthread_mutexattr_t attr;
2323
2324         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2325                 return;
2326
2327         pthread_mutexattr_init(&attr);
2328         pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
2329         pthread_mutex_init(&hw->flows_lock, &attr);
2330         dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2331
2332         TAILQ_INIT(&hw->flow_fdir_list);
2333         TAILQ_INIT(&hw->flow_rss_list);
2334         TAILQ_INIT(&hw->flow_list);
2335 }
2336
2337 void
2338 hns3_flow_uninit(struct rte_eth_dev *dev)
2339 {
2340         struct rte_flow_error error;
2341         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2342                 hns3_flow_flush_wrap(dev, &error);
2343 }