aba07aaa6f42906dc9ac3a6017d011f1d4b38152
[dpdk.git] / drivers / net / hns3 / hns3_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11 #include "hns3_flow.h"
12
13 /* Default default keys */
14 static uint8_t hns3_hash_key[] = {
15         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
16         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
17         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
18         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
19         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
20 };
21
22 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
23 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
24
25 /* Special Filter id for non-specific packet flagging. Don't change value */
26 #define HNS3_MAX_FILTER_ID      0x0FFF
27
28 #define ETHER_TYPE_MASK         0xFFFF
29 #define IPPROTO_MASK            0xFF
30 #define TUNNEL_TYPE_MASK        0xFFFF
31
32 #define HNS3_TUNNEL_TYPE_VXLAN          0x12B5
33 #define HNS3_TUNNEL_TYPE_VXLAN_GPE      0x12B6
34 #define HNS3_TUNNEL_TYPE_GENEVE         0x17C1
35 #define HNS3_TUNNEL_TYPE_NVGRE          0x6558
36
37 static enum rte_flow_item_type first_items[] = {
38         RTE_FLOW_ITEM_TYPE_ETH,
39         RTE_FLOW_ITEM_TYPE_IPV4,
40         RTE_FLOW_ITEM_TYPE_IPV6,
41         RTE_FLOW_ITEM_TYPE_TCP,
42         RTE_FLOW_ITEM_TYPE_UDP,
43         RTE_FLOW_ITEM_TYPE_SCTP,
44         RTE_FLOW_ITEM_TYPE_ICMP,
45         RTE_FLOW_ITEM_TYPE_NVGRE,
46         RTE_FLOW_ITEM_TYPE_VXLAN,
47         RTE_FLOW_ITEM_TYPE_GENEVE,
48         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
49 };
50
51 static enum rte_flow_item_type L2_next_items[] = {
52         RTE_FLOW_ITEM_TYPE_VLAN,
53         RTE_FLOW_ITEM_TYPE_IPV4,
54         RTE_FLOW_ITEM_TYPE_IPV6
55 };
56
57 static enum rte_flow_item_type L3_next_items[] = {
58         RTE_FLOW_ITEM_TYPE_TCP,
59         RTE_FLOW_ITEM_TYPE_UDP,
60         RTE_FLOW_ITEM_TYPE_SCTP,
61         RTE_FLOW_ITEM_TYPE_NVGRE,
62         RTE_FLOW_ITEM_TYPE_ICMP
63 };
64
65 static enum rte_flow_item_type L4_next_items[] = {
66         RTE_FLOW_ITEM_TYPE_VXLAN,
67         RTE_FLOW_ITEM_TYPE_GENEVE,
68         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
69 };
70
71 static enum rte_flow_item_type tunnel_next_items[] = {
72         RTE_FLOW_ITEM_TYPE_ETH,
73         RTE_FLOW_ITEM_TYPE_VLAN
74 };
75
76 struct items_step_mngr {
77         enum rte_flow_item_type *items;
78         int count;
79 };
80
81 static inline void
82 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
83 {
84         size_t i;
85
86         for (i = 0; i < len; i++)
87                 dst[i] = rte_be_to_cpu_32(src[i]);
88 }
89
90 /*
91  * This function is used to find rss general action.
92  * 1. As we know RSS is used to spread packets among several queues, the flow
93  *    API provide the struct rte_flow_action_rss, user could config its field
94  *    sush as: func/level/types/key/queue to control RSS function.
95  * 2. The flow API also supports queue region configuration for hns3. It was
96  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
97  *    which action is RSS queues region.
98  * 3. When action is RSS, we use the following rule to distinguish:
99  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
100  *            region configuration.
101  *    Case other: an rss general action.
102  */
103 static const struct rte_flow_action *
104 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
105                              const struct rte_flow_action actions[])
106 {
107         const struct rte_flow_action *act = NULL;
108         const struct hns3_rss_conf *rss;
109         bool have_eth = false;
110
111         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
112                 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
113                         act = actions;
114                         break;
115                 }
116         }
117         if (!act)
118                 return NULL;
119
120         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
121                 if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
122                         have_eth = true;
123                         break;
124                 }
125         }
126
127         rss = act->conf;
128         if (have_eth && rss->conf.queue_num) {
129                 /*
130                  * Pattern have ETH and action's queue_num > 0, indicate this is
131                  * queue region configuration.
132                  * Because queue region is implemented by FDIR + RSS in hns3
133                  * hardware, it needs to enter FDIR process, so here return NULL
134                  * to avoid enter RSS process.
135                  */
136                 return NULL;
137         }
138
139         return act;
140 }
141
142 static inline struct hns3_flow_counter *
143 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
144 {
145         struct hns3_adapter *hns = dev->data->dev_private;
146         struct hns3_pf *pf = &hns->pf;
147         struct hns3_flow_counter *cnt;
148
149         LIST_FOREACH(cnt, &pf->flow_counters, next) {
150                 if (cnt->id == id)
151                         return cnt;
152         }
153         return NULL;
154 }
155
156 static int
157 hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id,
158                  struct rte_flow_error *error)
159 {
160         struct hns3_adapter *hns = dev->data->dev_private;
161         struct hns3_pf *pf = &hns->pf;
162         struct hns3_hw *hw = &hns->hw;
163         struct hns3_flow_counter *cnt;
164         uint64_t value;
165         int ret;
166
167         cnt = hns3_counter_lookup(dev, id);
168         if (cnt) {
169                 if (!cnt->indirect || cnt->indirect != indirect)
170                         return rte_flow_error_set(error, ENOTSUP,
171                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
172                                 cnt,
173                                 "Counter id is used, indirect flag not match");
174                 /* Clear the indirect counter on first use. */
175                 if (cnt->indirect && cnt->ref_cnt == 1)
176                         (void)hns3_get_count(hw, id, &value);
177                 cnt->ref_cnt++;
178                 return 0;
179         }
180
181         /* Clear the counter by read ops because the counter is read-clear */
182         ret = hns3_get_count(hw, id, &value);
183         if (ret)
184                 return rte_flow_error_set(error, EIO,
185                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
186                                           "Clear counter failed!");
187
188         cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
189         if (cnt == NULL)
190                 return rte_flow_error_set(error, ENOMEM,
191                                           RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
192                                           "Alloc mem for counter failed");
193         cnt->id = id;
194         cnt->indirect = indirect;
195         cnt->ref_cnt = 1;
196         cnt->hits = 0;
197         LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
198         return 0;
199 }
200
201 static int
202 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
203                    struct rte_flow_query_count *qc,
204                    struct rte_flow_error *error)
205 {
206         struct hns3_adapter *hns = dev->data->dev_private;
207         struct hns3_flow_counter *cnt;
208         uint64_t value;
209         int ret;
210
211         /* FDIR is available only in PF driver */
212         if (hns->is_vf)
213                 return rte_flow_error_set(error, ENOTSUP,
214                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
215                                           "Fdir is not supported in VF");
216         cnt = hns3_counter_lookup(dev, flow->counter_id);
217         if (cnt == NULL)
218                 return rte_flow_error_set(error, EINVAL,
219                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
220                                           "Can't find counter id");
221
222         ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
223         if (ret) {
224                 rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
225                                    NULL, "Read counter fail.");
226                 return ret;
227         }
228         qc->hits_set = 1;
229         qc->hits = value;
230         qc->bytes_set = 0;
231         qc->bytes = 0;
232
233         return 0;
234 }
235
236 static int
237 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
238 {
239         struct hns3_adapter *hns = dev->data->dev_private;
240         struct hns3_hw *hw = &hns->hw;
241         struct hns3_flow_counter *cnt;
242
243         cnt = hns3_counter_lookup(dev, id);
244         if (cnt == NULL) {
245                 hns3_err(hw, "Can't find available counter to release");
246                 return -EINVAL;
247         }
248         cnt->ref_cnt--;
249         if (cnt->ref_cnt == 0) {
250                 LIST_REMOVE(cnt, next);
251                 rte_free(cnt);
252         }
253         return 0;
254 }
255
256 static void
257 hns3_counter_flush(struct rte_eth_dev *dev)
258 {
259         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
260         LIST_HEAD(counters, hns3_flow_counter) indir_counters;
261         struct hns3_flow_counter *cnt_ptr;
262
263         LIST_INIT(&indir_counters);
264         cnt_ptr = LIST_FIRST(&pf->flow_counters);
265         while (cnt_ptr) {
266                 LIST_REMOVE(cnt_ptr, next);
267                 if (cnt_ptr->indirect)
268                         LIST_INSERT_HEAD(&indir_counters, cnt_ptr, next);
269                 else
270                         rte_free(cnt_ptr);
271                 cnt_ptr = LIST_FIRST(&pf->flow_counters);
272         }
273
274         /* Reset the indirect action and add to pf->flow_counters list. */
275         cnt_ptr = LIST_FIRST(&indir_counters);
276         while (cnt_ptr) {
277                 LIST_REMOVE(cnt_ptr, next);
278                 cnt_ptr->ref_cnt = 1;
279                 cnt_ptr->hits = 0;
280                 LIST_INSERT_HEAD(&pf->flow_counters, cnt_ptr, next);
281                 cnt_ptr = LIST_FIRST(&indir_counters);
282         }
283 }
284
285 static int
286 hns3_handle_action_queue(struct rte_eth_dev *dev,
287                          const struct rte_flow_action *action,
288                          struct hns3_fdir_rule *rule,
289                          struct rte_flow_error *error)
290 {
291         struct hns3_adapter *hns = dev->data->dev_private;
292         const struct rte_flow_action_queue *queue;
293         struct hns3_hw *hw = &hns->hw;
294
295         queue = (const struct rte_flow_action_queue *)action->conf;
296         if (queue->index >= hw->data->nb_rx_queues) {
297                 hns3_err(hw, "queue ID(%u) is greater than number of "
298                           "available queue (%u) in driver.",
299                           queue->index, hw->data->nb_rx_queues);
300                 return rte_flow_error_set(error, EINVAL,
301                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
302                                           action, "Invalid queue ID in PF");
303         }
304
305         rule->queue_id = queue->index;
306         rule->nb_queues = 1;
307         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
308         return 0;
309 }
310
311 static int
312 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
313                                 const struct rte_flow_action *action,
314                                 struct hns3_fdir_rule *rule,
315                                 struct rte_flow_error *error)
316 {
317         struct hns3_adapter *hns = dev->data->dev_private;
318         const struct rte_flow_action_rss *conf = action->conf;
319         struct hns3_hw *hw = &hns->hw;
320         uint16_t idx;
321
322         if (!hns3_dev_get_support(hw, FD_QUEUE_REGION))
323                 return rte_flow_error_set(error, ENOTSUP,
324                         RTE_FLOW_ERROR_TYPE_ACTION, action,
325                         "Not support config queue region!");
326
327         if ((!rte_is_power_of_2(conf->queue_num)) ||
328                 conf->queue_num > hw->rss_size_max ||
329                 conf->queue[0] >= hw->data->nb_rx_queues ||
330                 conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) {
331                 return rte_flow_error_set(error, EINVAL,
332                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
333                         "Invalid start queue ID and queue num! the start queue "
334                         "ID must valid, the queue num must be power of 2 and "
335                         "<= rss_size_max.");
336         }
337
338         for (idx = 1; idx < conf->queue_num; idx++) {
339                 if (conf->queue[idx] != conf->queue[idx - 1] + 1)
340                         return rte_flow_error_set(error, EINVAL,
341                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
342                                 "Invalid queue ID sequence! the queue ID "
343                                 "must be continuous increment.");
344         }
345
346         rule->queue_id = conf->queue[0];
347         rule->nb_queues = conf->queue_num;
348         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
349         return 0;
350 }
351
352 static int
353 hns3_handle_action_indirect(struct rte_eth_dev *dev,
354                             const struct rte_flow_action *action,
355                             struct hns3_fdir_rule *rule,
356                             struct rte_flow_error *error)
357 {
358         const struct rte_flow_action_handle *indir = action->conf;
359
360         if (indir->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT)
361                 return rte_flow_error_set(error, EINVAL,
362                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
363                                 action, "Invalid indirect type");
364
365         if (hns3_counter_lookup(dev, indir->counter_id) == NULL)
366                 return rte_flow_error_set(error, EINVAL,
367                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
368                                 action, "Counter id not exist");
369
370         rule->act_cnt.id = indir->counter_id;
371         rule->flags |= (HNS3_RULE_FLAG_COUNTER | HNS3_RULE_FLAG_COUNTER_INDIR);
372
373         return 0;
374 }
375
376 /*
377  * Parse actions structure from the provided pattern.
378  * The pattern is validated as the items are copied.
379  *
380  * @param actions[in]
381  * @param rule[out]
382  *   NIC specific actions derived from the actions.
383  * @param error[out]
384  */
385 static int
386 hns3_handle_actions(struct rte_eth_dev *dev,
387                     const struct rte_flow_action actions[],
388                     struct hns3_fdir_rule *rule, struct rte_flow_error *error)
389 {
390         struct hns3_adapter *hns = dev->data->dev_private;
391         const struct rte_flow_action_count *act_count;
392         const struct rte_flow_action_mark *mark;
393         struct hns3_pf *pf = &hns->pf;
394         uint32_t counter_num;
395         int ret;
396
397         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
398                 switch (actions->type) {
399                 case RTE_FLOW_ACTION_TYPE_QUEUE:
400                         ret = hns3_handle_action_queue(dev, actions, rule,
401                                                        error);
402                         if (ret)
403                                 return ret;
404                         break;
405                 case RTE_FLOW_ACTION_TYPE_DROP:
406                         rule->action = HNS3_FD_ACTION_DROP_PACKET;
407                         break;
408                 /*
409                  * Here RSS's real action is queue region.
410                  * Queue region is implemented by FDIR + RSS in hns3 hardware,
411                  * the FDIR's action is one queue region (start_queue_id and
412                  * queue_num), then RSS spread packets to the queue region by
413                  * RSS algorithm.
414                  */
415                 case RTE_FLOW_ACTION_TYPE_RSS:
416                         ret = hns3_handle_action_queue_region(dev, actions,
417                                                               rule, error);
418                         if (ret)
419                                 return ret;
420                         break;
421                 case RTE_FLOW_ACTION_TYPE_MARK:
422                         mark =
423                             (const struct rte_flow_action_mark *)actions->conf;
424                         if (mark->id >= HNS3_MAX_FILTER_ID)
425                                 return rte_flow_error_set(error, EINVAL,
426                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
427                                                 actions,
428                                                 "Invalid Mark ID");
429                         rule->fd_id = mark->id;
430                         rule->flags |= HNS3_RULE_FLAG_FDID;
431                         break;
432                 case RTE_FLOW_ACTION_TYPE_FLAG:
433                         rule->fd_id = HNS3_MAX_FILTER_ID;
434                         rule->flags |= HNS3_RULE_FLAG_FDID;
435                         break;
436                 case RTE_FLOW_ACTION_TYPE_COUNT:
437                         act_count =
438                             (const struct rte_flow_action_count *)actions->conf;
439                         counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
440                         if (act_count->id >= counter_num)
441                                 return rte_flow_error_set(error, EINVAL,
442                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
443                                                 actions,
444                                                 "Invalid counter id");
445                         rule->act_cnt = *act_count;
446                         rule->flags |= HNS3_RULE_FLAG_COUNTER;
447                         rule->flags &= ~HNS3_RULE_FLAG_COUNTER_INDIR;
448                         break;
449                 case RTE_FLOW_ACTION_TYPE_INDIRECT:
450                         ret = hns3_handle_action_indirect(dev, actions, rule,
451                                                           error);
452                         if (ret)
453                                 return ret;
454                         break;
455                 case RTE_FLOW_ACTION_TYPE_VOID:
456                         break;
457                 default:
458                         return rte_flow_error_set(error, ENOTSUP,
459                                                   RTE_FLOW_ERROR_TYPE_ACTION,
460                                                   NULL, "Unsupported action");
461                 }
462         }
463
464         return 0;
465 }
466
467 static int
468 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
469 {
470         if (!attr->ingress)
471                 return rte_flow_error_set(error, EINVAL,
472                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
473                                           attr, "Ingress can't be zero");
474         if (attr->egress)
475                 return rte_flow_error_set(error, ENOTSUP,
476                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
477                                           attr, "Not support egress");
478         if (attr->transfer)
479                 return rte_flow_error_set(error, ENOTSUP,
480                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
481                                           attr, "No support for transfer");
482         if (attr->priority)
483                 return rte_flow_error_set(error, ENOTSUP,
484                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
485                                           attr, "Not support priority");
486         if (attr->group)
487                 return rte_flow_error_set(error, ENOTSUP,
488                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
489                                           attr, "Not support group");
490         return 0;
491 }
492
493 static int
494 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
495                struct rte_flow_error *error __rte_unused)
496 {
497         const struct rte_flow_item_eth *eth_spec;
498         const struct rte_flow_item_eth *eth_mask;
499
500         /* Only used to describe the protocol stack. */
501         if (item->spec == NULL && item->mask == NULL)
502                 return 0;
503
504         if (item->mask) {
505                 eth_mask = item->mask;
506                 if (eth_mask->type) {
507                         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
508                         rule->key_conf.mask.ether_type =
509                             rte_be_to_cpu_16(eth_mask->type);
510                 }
511                 if (!rte_is_zero_ether_addr(&eth_mask->src)) {
512                         hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
513                         memcpy(rule->key_conf.mask.src_mac,
514                                eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
515                 }
516                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
517                         hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
518                         memcpy(rule->key_conf.mask.dst_mac,
519                                eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
520                 }
521         }
522
523         eth_spec = item->spec;
524         rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
525         memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
526                RTE_ETHER_ADDR_LEN);
527         memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
528                RTE_ETHER_ADDR_LEN);
529         return 0;
530 }
531
532 static int
533 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
534                 struct rte_flow_error *error)
535 {
536         const struct rte_flow_item_vlan *vlan_spec;
537         const struct rte_flow_item_vlan *vlan_mask;
538
539         rule->key_conf.vlan_num++;
540         if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
541                 return rte_flow_error_set(error, EINVAL,
542                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
543                                           "Vlan_num is more than 2");
544
545         /* Only used to describe the protocol stack. */
546         if (item->spec == NULL && item->mask == NULL)
547                 return 0;
548
549         if (item->mask) {
550                 vlan_mask = item->mask;
551                 if (vlan_mask->tci) {
552                         if (rule->key_conf.vlan_num == 1) {
553                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
554                                              1);
555                                 rule->key_conf.mask.vlan_tag1 =
556                                     rte_be_to_cpu_16(vlan_mask->tci);
557                         } else {
558                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
559                                              1);
560                                 rule->key_conf.mask.vlan_tag2 =
561                                     rte_be_to_cpu_16(vlan_mask->tci);
562                         }
563                 }
564         }
565
566         vlan_spec = item->spec;
567         if (rule->key_conf.vlan_num == 1)
568                 rule->key_conf.spec.vlan_tag1 =
569                     rte_be_to_cpu_16(vlan_spec->tci);
570         else
571                 rule->key_conf.spec.vlan_tag2 =
572                     rte_be_to_cpu_16(vlan_spec->tci);
573         return 0;
574 }
575
576 static bool
577 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
578 {
579         if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
580             ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
581             ipv4_mask->hdr.hdr_checksum)
582                 return false;
583
584         return true;
585 }
586
587 static int
588 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
589                 struct rte_flow_error *error)
590 {
591         const struct rte_flow_item_ipv4 *ipv4_spec;
592         const struct rte_flow_item_ipv4 *ipv4_mask;
593
594         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
595         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
596         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
597
598         /* Only used to describe the protocol stack. */
599         if (item->spec == NULL && item->mask == NULL)
600                 return 0;
601
602         if (item->mask) {
603                 ipv4_mask = item->mask;
604                 if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
605                         return rte_flow_error_set(error, EINVAL,
606                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
607                                                   item,
608                                                   "Only support src & dst ip,tos,proto in IPV4");
609                 }
610
611                 if (ipv4_mask->hdr.src_addr) {
612                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
613                         rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
614                             rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
615                 }
616
617                 if (ipv4_mask->hdr.dst_addr) {
618                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
619                         rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
620                             rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
621                 }
622
623                 if (ipv4_mask->hdr.type_of_service) {
624                         hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
625                         rule->key_conf.mask.ip_tos =
626                             ipv4_mask->hdr.type_of_service;
627                 }
628
629                 if (ipv4_mask->hdr.next_proto_id) {
630                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
631                         rule->key_conf.mask.ip_proto =
632                             ipv4_mask->hdr.next_proto_id;
633                 }
634         }
635
636         ipv4_spec = item->spec;
637         rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
638             rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
639         rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
640             rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
641         rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
642         rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
643         return 0;
644 }
645
646 static int
647 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
648                 struct rte_flow_error *error)
649 {
650         const struct rte_flow_item_ipv6 *ipv6_spec;
651         const struct rte_flow_item_ipv6 *ipv6_mask;
652
653         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
654         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
655         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
656
657         /* Only used to describe the protocol stack. */
658         if (item->spec == NULL && item->mask == NULL)
659                 return 0;
660
661         if (item->mask) {
662                 ipv6_mask = item->mask;
663                 if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
664                     ipv6_mask->hdr.hop_limits) {
665                         return rte_flow_error_set(error, EINVAL,
666                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
667                                                   item,
668                                                   "Only support src & dst ip,proto in IPV6");
669                 }
670                 net_addr_to_host(rule->key_conf.mask.src_ip,
671                                  (const rte_be32_t *)ipv6_mask->hdr.src_addr,
672                                  IP_ADDR_LEN);
673                 net_addr_to_host(rule->key_conf.mask.dst_ip,
674                                  (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
675                                  IP_ADDR_LEN);
676                 rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
677                 if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
678                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
679                 if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
680                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
681                 if (ipv6_mask->hdr.proto)
682                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
683         }
684
685         ipv6_spec = item->spec;
686         net_addr_to_host(rule->key_conf.spec.src_ip,
687                          (const rte_be32_t *)ipv6_spec->hdr.src_addr,
688                          IP_ADDR_LEN);
689         net_addr_to_host(rule->key_conf.spec.dst_ip,
690                          (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
691                          IP_ADDR_LEN);
692         rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
693
694         return 0;
695 }
696
697 static bool
698 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
699 {
700         if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
701             tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
702             tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
703             tcp_mask->hdr.tcp_urp)
704                 return false;
705
706         return true;
707 }
708
709 static int
710 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
711                struct rte_flow_error *error)
712 {
713         const struct rte_flow_item_tcp *tcp_spec;
714         const struct rte_flow_item_tcp *tcp_mask;
715
716         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
717         rule->key_conf.spec.ip_proto = IPPROTO_TCP;
718         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
719
720         /* Only used to describe the protocol stack. */
721         if (item->spec == NULL && item->mask == NULL)
722                 return 0;
723
724         if (item->mask) {
725                 tcp_mask = item->mask;
726                 if (!hns3_check_tcp_mask_supported(tcp_mask)) {
727                         return rte_flow_error_set(error, EINVAL,
728                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
729                                                   item,
730                                                   "Only support src & dst port in TCP");
731                 }
732
733                 if (tcp_mask->hdr.src_port) {
734                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
735                         rule->key_conf.mask.src_port =
736                             rte_be_to_cpu_16(tcp_mask->hdr.src_port);
737                 }
738                 if (tcp_mask->hdr.dst_port) {
739                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
740                         rule->key_conf.mask.dst_port =
741                             rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
742                 }
743         }
744
745         tcp_spec = item->spec;
746         rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
747         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
748
749         return 0;
750 }
751
752 static int
753 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
754                struct rte_flow_error *error)
755 {
756         const struct rte_flow_item_udp *udp_spec;
757         const struct rte_flow_item_udp *udp_mask;
758
759         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
760         rule->key_conf.spec.ip_proto = IPPROTO_UDP;
761         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
762
763         /* Only used to describe the protocol stack. */
764         if (item->spec == NULL && item->mask == NULL)
765                 return 0;
766
767         if (item->mask) {
768                 udp_mask = item->mask;
769                 if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
770                         return rte_flow_error_set(error, EINVAL,
771                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
772                                                   item,
773                                                   "Only support src & dst port in UDP");
774                 }
775                 if (udp_mask->hdr.src_port) {
776                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
777                         rule->key_conf.mask.src_port =
778                             rte_be_to_cpu_16(udp_mask->hdr.src_port);
779                 }
780                 if (udp_mask->hdr.dst_port) {
781                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
782                         rule->key_conf.mask.dst_port =
783                             rte_be_to_cpu_16(udp_mask->hdr.dst_port);
784                 }
785         }
786
787         udp_spec = item->spec;
788         rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
789         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
790
791         return 0;
792 }
793
794 static int
795 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
796                 struct rte_flow_error *error)
797 {
798         const struct rte_flow_item_sctp *sctp_spec;
799         const struct rte_flow_item_sctp *sctp_mask;
800
801         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
802         rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
803         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
804
805         /* Only used to describe the protocol stack. */
806         if (item->spec == NULL && item->mask == NULL)
807                 return 0;
808
809         if (item->mask) {
810                 sctp_mask = item->mask;
811                 if (sctp_mask->hdr.cksum)
812                         return rte_flow_error_set(error, EINVAL,
813                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
814                                                   item,
815                                                   "Only support src & dst port in SCTP");
816                 if (sctp_mask->hdr.src_port) {
817                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
818                         rule->key_conf.mask.src_port =
819                             rte_be_to_cpu_16(sctp_mask->hdr.src_port);
820                 }
821                 if (sctp_mask->hdr.dst_port) {
822                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
823                         rule->key_conf.mask.dst_port =
824                             rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
825                 }
826                 if (sctp_mask->hdr.tag) {
827                         hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
828                         rule->key_conf.mask.sctp_tag =
829                             rte_be_to_cpu_32(sctp_mask->hdr.tag);
830                 }
831         }
832
833         sctp_spec = item->spec;
834         rule->key_conf.spec.src_port =
835             rte_be_to_cpu_16(sctp_spec->hdr.src_port);
836         rule->key_conf.spec.dst_port =
837             rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
838         rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
839
840         return 0;
841 }
842
843 /*
844  * Check items before tunnel, save inner configs to outer configs, and clear
845  * inner configs.
846  * The key consists of two parts: meta_data and tuple keys.
847  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
848  * packet(1bit).
849  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
850  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
851  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
852  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
853  * Vlantag2(16bit) and sctp-tag(32bit).
854  */
855 static int
856 hns3_handle_tunnel(const struct rte_flow_item *item,
857                    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
858 {
859         /* check eth config */
860         if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
861                 return rte_flow_error_set(error, EINVAL,
862                                           RTE_FLOW_ERROR_TYPE_ITEM,
863                                           item, "Outer eth mac is unsupported");
864         if (rule->input_set & BIT(INNER_ETH_TYPE)) {
865                 hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
866                 rule->key_conf.spec.outer_ether_type =
867                     rule->key_conf.spec.ether_type;
868                 rule->key_conf.mask.outer_ether_type =
869                     rule->key_conf.mask.ether_type;
870                 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
871                 rule->key_conf.spec.ether_type = 0;
872                 rule->key_conf.mask.ether_type = 0;
873         }
874
875         /* check vlan config */
876         if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
877                 return rte_flow_error_set(error, EINVAL,
878                                           RTE_FLOW_ERROR_TYPE_ITEM,
879                                           item,
880                                           "Outer vlan tags is unsupported");
881
882         /* clear vlan_num for inner vlan select */
883         rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
884         rule->key_conf.vlan_num = 0;
885
886         /* check L3 config */
887         if (rule->input_set &
888             (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
889                 return rte_flow_error_set(error, EINVAL,
890                                           RTE_FLOW_ERROR_TYPE_ITEM,
891                                           item, "Outer ip is unsupported");
892         if (rule->input_set & BIT(INNER_IP_PROTO)) {
893                 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
894                 rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
895                 rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
896                 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
897                 rule->key_conf.spec.ip_proto = 0;
898                 rule->key_conf.mask.ip_proto = 0;
899         }
900
901         /* check L4 config */
902         if (rule->input_set & BIT(INNER_SCTP_TAG))
903                 return rte_flow_error_set(error, EINVAL,
904                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
905                                           "Outer sctp tag is unsupported");
906
907         if (rule->input_set & BIT(INNER_SRC_PORT)) {
908                 hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
909                 rule->key_conf.spec.outer_src_port =
910                     rule->key_conf.spec.src_port;
911                 rule->key_conf.mask.outer_src_port =
912                     rule->key_conf.mask.src_port;
913                 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
914                 rule->key_conf.spec.src_port = 0;
915                 rule->key_conf.mask.src_port = 0;
916         }
917         if (rule->input_set & BIT(INNER_DST_PORT)) {
918                 hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
919                 rule->key_conf.spec.dst_port = 0;
920                 rule->key_conf.mask.dst_port = 0;
921         }
922         return 0;
923 }
924
925 static int
926 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
927                  struct rte_flow_error *error)
928 {
929         const struct rte_flow_item_vxlan *vxlan_spec;
930         const struct rte_flow_item_vxlan *vxlan_mask;
931
932         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
933         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
934         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
935                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
936         else
937                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
938
939         /* Only used to describe the protocol stack. */
940         if (item->spec == NULL && item->mask == NULL)
941                 return 0;
942
943         vxlan_mask = item->mask;
944         vxlan_spec = item->spec;
945
946         if (vxlan_mask->flags)
947                 return rte_flow_error_set(error, EINVAL,
948                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
949                                           "Flags is not supported in VxLAN");
950
951         /* VNI must be totally masked or not. */
952         if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
953             memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
954                 return rte_flow_error_set(error, EINVAL,
955                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
956                                           "VNI must be totally masked or not in VxLAN");
957         if (vxlan_mask->vni[0]) {
958                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
959                 memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
960                            VNI_OR_TNI_LEN);
961         }
962         memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
963                    VNI_OR_TNI_LEN);
964         return 0;
965 }
966
967 static int
968 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
969                  struct rte_flow_error *error)
970 {
971         const struct rte_flow_item_nvgre *nvgre_spec;
972         const struct rte_flow_item_nvgre *nvgre_mask;
973
974         hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
975         rule->key_conf.spec.outer_proto = IPPROTO_GRE;
976         rule->key_conf.mask.outer_proto = IPPROTO_MASK;
977
978         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
979         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
980         rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
981         /* Only used to describe the protocol stack. */
982         if (item->spec == NULL && item->mask == NULL)
983                 return 0;
984
985         nvgre_mask = item->mask;
986         nvgre_spec = item->spec;
987
988         if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
989                 return rte_flow_error_set(error, EINVAL,
990                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
991                                           "Ver/protocol is not supported in NVGRE");
992
993         /* TNI must be totally masked or not. */
994         if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
995             memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
996                 return rte_flow_error_set(error, EINVAL,
997                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
998                                           "TNI must be totally masked or not in NVGRE");
999
1000         if (nvgre_mask->tni[0]) {
1001                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1002                 memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
1003                            VNI_OR_TNI_LEN);
1004         }
1005         memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
1006                    VNI_OR_TNI_LEN);
1007
1008         if (nvgre_mask->flow_id) {
1009                 hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
1010                 rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
1011         }
1012         rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
1013         return 0;
1014 }
1015
1016 static int
1017 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1018                   struct rte_flow_error *error)
1019 {
1020         const struct rte_flow_item_geneve *geneve_spec;
1021         const struct rte_flow_item_geneve *geneve_mask;
1022
1023         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1024         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
1025         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1026         /* Only used to describe the protocol stack. */
1027         if (item->spec == NULL && item->mask == NULL)
1028                 return 0;
1029
1030         geneve_mask = item->mask;
1031         geneve_spec = item->spec;
1032
1033         if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
1034                 return rte_flow_error_set(error, EINVAL,
1035                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1036                                           "Ver/protocol is not supported in GENEVE");
1037         /* VNI must be totally masked or not. */
1038         if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
1039             memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
1040                 return rte_flow_error_set(error, EINVAL,
1041                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1042                                           "VNI must be totally masked or not in GENEVE");
1043         if (geneve_mask->vni[0]) {
1044                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1045                 memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
1046                            VNI_OR_TNI_LEN);
1047         }
1048         memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1049                    VNI_OR_TNI_LEN);
1050         return 0;
1051 }
1052
1053 static int
1054 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1055                   struct rte_flow_error *error)
1056 {
1057         int ret;
1058
1059         if (item->spec == NULL && item->mask)
1060                 return rte_flow_error_set(error, EINVAL,
1061                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1062                                           "Can't configure FDIR with mask "
1063                                           "but without spec");
1064         else if (item->spec && (item->mask == NULL))
1065                 return rte_flow_error_set(error, EINVAL,
1066                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1067                                           "Tunnel packets must configure "
1068                                           "with mask");
1069
1070         switch (item->type) {
1071         case RTE_FLOW_ITEM_TYPE_VXLAN:
1072         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1073                 ret = hns3_parse_vxlan(item, rule, error);
1074                 break;
1075         case RTE_FLOW_ITEM_TYPE_NVGRE:
1076                 ret = hns3_parse_nvgre(item, rule, error);
1077                 break;
1078         case RTE_FLOW_ITEM_TYPE_GENEVE:
1079                 ret = hns3_parse_geneve(item, rule, error);
1080                 break;
1081         default:
1082                 return rte_flow_error_set(error, ENOTSUP,
1083                                           RTE_FLOW_ERROR_TYPE_ITEM,
1084                                           NULL, "Unsupported tunnel type!");
1085         }
1086         if (ret)
1087                 return ret;
1088         return hns3_handle_tunnel(item, rule, error);
1089 }
1090
1091 static int
1092 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1093                   struct items_step_mngr *step_mngr,
1094                   struct rte_flow_error *error)
1095 {
1096         int ret;
1097
1098         if (item->spec == NULL && item->mask)
1099                 return rte_flow_error_set(error, EINVAL,
1100                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1101                                           "Can't configure FDIR with mask "
1102                                           "but without spec");
1103
1104         switch (item->type) {
1105         case RTE_FLOW_ITEM_TYPE_ETH:
1106                 ret = hns3_parse_eth(item, rule, error);
1107                 step_mngr->items = L2_next_items;
1108                 step_mngr->count = RTE_DIM(L2_next_items);
1109                 break;
1110         case RTE_FLOW_ITEM_TYPE_VLAN:
1111                 ret = hns3_parse_vlan(item, rule, error);
1112                 step_mngr->items = L2_next_items;
1113                 step_mngr->count = RTE_DIM(L2_next_items);
1114                 break;
1115         case RTE_FLOW_ITEM_TYPE_IPV4:
1116                 ret = hns3_parse_ipv4(item, rule, error);
1117                 step_mngr->items = L3_next_items;
1118                 step_mngr->count = RTE_DIM(L3_next_items);
1119                 break;
1120         case RTE_FLOW_ITEM_TYPE_IPV6:
1121                 ret = hns3_parse_ipv6(item, rule, error);
1122                 step_mngr->items = L3_next_items;
1123                 step_mngr->count = RTE_DIM(L3_next_items);
1124                 break;
1125         case RTE_FLOW_ITEM_TYPE_TCP:
1126                 ret = hns3_parse_tcp(item, rule, error);
1127                 step_mngr->items = L4_next_items;
1128                 step_mngr->count = RTE_DIM(L4_next_items);
1129                 break;
1130         case RTE_FLOW_ITEM_TYPE_UDP:
1131                 ret = hns3_parse_udp(item, rule, error);
1132                 step_mngr->items = L4_next_items;
1133                 step_mngr->count = RTE_DIM(L4_next_items);
1134                 break;
1135         case RTE_FLOW_ITEM_TYPE_SCTP:
1136                 ret = hns3_parse_sctp(item, rule, error);
1137                 step_mngr->items = L4_next_items;
1138                 step_mngr->count = RTE_DIM(L4_next_items);
1139                 break;
1140         default:
1141                 return rte_flow_error_set(error, ENOTSUP,
1142                                           RTE_FLOW_ERROR_TYPE_ITEM,
1143                                           NULL, "Unsupported normal type!");
1144         }
1145
1146         return ret;
1147 }
1148
1149 static int
1150 hns3_validate_item(const struct rte_flow_item *item,
1151                    struct items_step_mngr step_mngr,
1152                    struct rte_flow_error *error)
1153 {
1154         int i;
1155
1156         if (item->last)
1157                 return rte_flow_error_set(error, ENOTSUP,
1158                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1159                                           "Not supported last point for range");
1160
1161         for (i = 0; i < step_mngr.count; i++) {
1162                 if (item->type == step_mngr.items[i])
1163                         break;
1164         }
1165
1166         if (i == step_mngr.count) {
1167                 return rte_flow_error_set(error, EINVAL,
1168                                           RTE_FLOW_ERROR_TYPE_ITEM,
1169                                           item, "Inval or missing item");
1170         }
1171         return 0;
1172 }
1173
1174 static inline bool
1175 is_tunnel_packet(enum rte_flow_item_type type)
1176 {
1177         if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1178             type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1179             type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1180             type == RTE_FLOW_ITEM_TYPE_GENEVE)
1181                 return true;
1182         return false;
1183 }
1184
1185 /*
1186  * Parse the flow director rule.
1187  * The supported PATTERN:
1188  *   case: non-tunnel packet:
1189  *     ETH : src-mac, dst-mac, ethertype
1190  *     VLAN: tag1, tag2
1191  *     IPv4: src-ip, dst-ip, tos, proto
1192  *     IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto
1193  *     UDP : src-port, dst-port
1194  *     TCP : src-port, dst-port
1195  *     SCTP: src-port, dst-port, tag
1196  *   case: tunnel packet:
1197  *     OUTER-ETH: ethertype
1198  *     OUTER-L3 : proto
1199  *     OUTER-L4 : src-port, dst-port
1200  *     TUNNEL   : vni, flow-id(only valid when NVGRE)
1201  *     INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet
1202  * The supported ACTION:
1203  *    QUEUE
1204  *    DROP
1205  *    COUNT
1206  *    MARK: the id range [0, 4094]
1207  *    FLAG
1208  *    RSS: only valid if firmware support FD_QUEUE_REGION.
1209  */
1210 static int
1211 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1212                        const struct rte_flow_item pattern[],
1213                        const struct rte_flow_action actions[],
1214                        struct hns3_fdir_rule *rule,
1215                        struct rte_flow_error *error)
1216 {
1217         struct hns3_adapter *hns = dev->data->dev_private;
1218         const struct rte_flow_item *item;
1219         struct items_step_mngr step_mngr;
1220         int ret;
1221
1222         /* FDIR is available only in PF driver */
1223         if (hns->is_vf)
1224                 return rte_flow_error_set(error, ENOTSUP,
1225                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1226                                           "Fdir not supported in VF");
1227
1228         step_mngr.items = first_items;
1229         step_mngr.count = RTE_DIM(first_items);
1230         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1231                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1232                         continue;
1233
1234                 ret = hns3_validate_item(item, step_mngr, error);
1235                 if (ret)
1236                         return ret;
1237
1238                 if (is_tunnel_packet(item->type)) {
1239                         ret = hns3_parse_tunnel(item, rule, error);
1240                         if (ret)
1241                                 return ret;
1242                         step_mngr.items = tunnel_next_items;
1243                         step_mngr.count = RTE_DIM(tunnel_next_items);
1244                 } else {
1245                         ret = hns3_parse_normal(item, rule, &step_mngr, error);
1246                         if (ret)
1247                                 return ret;
1248                 }
1249         }
1250
1251         return hns3_handle_actions(dev, actions, rule, error);
1252 }
1253
1254 static void
1255 hns3_filterlist_flush(struct rte_eth_dev *dev)
1256 {
1257         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1258         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1259         struct hns3_rss_conf_ele *rss_filter_ptr;
1260         struct hns3_flow_mem *flow_node;
1261
1262         fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1263         while (fdir_rule_ptr) {
1264                 TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1265                 rte_free(fdir_rule_ptr);
1266                 fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1267         }
1268
1269         rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1270         while (rss_filter_ptr) {
1271                 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1272                 rte_free(rss_filter_ptr);
1273                 rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1274         }
1275
1276         flow_node = TAILQ_FIRST(&hw->flow_list);
1277         while (flow_node) {
1278                 TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1279                 rte_free(flow_node->flow);
1280                 rte_free(flow_node);
1281                 flow_node = TAILQ_FIRST(&hw->flow_list);
1282         }
1283 }
1284
1285 static bool
1286 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1287                      const struct rte_flow_action_rss *with)
1288 {
1289         bool rss_key_is_same;
1290         bool func_is_same;
1291
1292         /*
1293          * When user flush all RSS rule, RSS func is set invalid with
1294          * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1295          * flushed, any validate RSS func is different with it before
1296          * flushed. Others, when user create an action RSS with RSS func
1297          * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1298          * between continuous RSS flow.
1299          */
1300         if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1301                 func_is_same = false;
1302         else
1303                 func_is_same = (with->func != RTE_ETH_HASH_FUNCTION_DEFAULT) ?
1304                                 (comp->func == with->func) : true;
1305
1306         if (with->key_len == 0 || with->key == NULL)
1307                 rss_key_is_same = 1;
1308         else
1309                 rss_key_is_same = comp->key_len == with->key_len &&
1310                 !memcmp(comp->key, with->key, with->key_len);
1311
1312         return (func_is_same && rss_key_is_same &&
1313                 comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1314                 comp->level == with->level &&
1315                 comp->queue_num == with->queue_num &&
1316                 !memcmp(comp->queue, with->queue,
1317                         sizeof(*with->queue) * with->queue_num));
1318 }
1319
1320 static int
1321 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1322                    const struct rte_flow_action_rss *in)
1323 {
1324         if (in->key_len > RTE_DIM(out->key) ||
1325             in->queue_num > RTE_DIM(out->queue))
1326                 return -EINVAL;
1327         if (in->key == NULL && in->key_len)
1328                 return -EINVAL;
1329         out->conf = (struct rte_flow_action_rss) {
1330                 .func = in->func,
1331                 .level = in->level,
1332                 .types = in->types,
1333                 .key_len = in->key_len,
1334                 .queue_num = in->queue_num,
1335         };
1336         out->conf.queue = memcpy(out->queue, in->queue,
1337                                 sizeof(*in->queue) * in->queue_num);
1338         if (in->key)
1339                 out->conf.key = memcpy(out->key, in->key, in->key_len);
1340
1341         return 0;
1342 }
1343
1344 static bool
1345 hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1346                                const struct rte_flow_action_rss *rss)
1347 {
1348         /*
1349          * For IP packet, it is not supported to use src/dst port fields to RSS
1350          * hash for the following packet types.
1351          * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1352          * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1353          * port fields to RSS hash for IPV6 SCTP packet type. However, the
1354          * Kunpeng930 and future kunpeng series support to use src/dst port
1355          * fields to RSS hash for IPv6 SCTP packet type.
1356          */
1357         if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
1358             (rss->types & RTE_ETH_RSS_IP ||
1359             (!hw->rss_info.ipv6_sctp_offload_supported &&
1360             rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
1361                 return false;
1362
1363         return true;
1364 }
1365
1366 /*
1367  * This function is used to parse rss action validation.
1368  */
1369 static int
1370 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1371                       const struct rte_flow_action *actions,
1372                       struct rte_flow_error *error)
1373 {
1374         struct hns3_adapter *hns = dev->data->dev_private;
1375         struct hns3_hw *hw = &hns->hw;
1376         struct hns3_rss_conf *rss_conf = &hw->rss_info;
1377         const struct rte_flow_action_rss *rss;
1378         const struct rte_flow_action *act;
1379         uint32_t act_index = 0;
1380         uint16_t n;
1381
1382         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1383         rss = act->conf;
1384
1385         if (rss == NULL) {
1386                 return rte_flow_error_set(error, EINVAL,
1387                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1388                                           act, "no valid queues");
1389         }
1390
1391         if (rss->queue_num > RTE_DIM(rss_conf->queue))
1392                 return rte_flow_error_set(error, ENOTSUP,
1393                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1394                                           "queue number configured exceeds "
1395                                           "queue buffer size driver supported");
1396
1397         for (n = 0; n < rss->queue_num; n++) {
1398                 if (rss->queue[n] < hw->alloc_rss_size)
1399                         continue;
1400                 return rte_flow_error_set(error, EINVAL,
1401                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1402                                           "queue id must be less than queue number allocated to a TC");
1403         }
1404
1405         if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1406                 return rte_flow_error_set(error, EINVAL,
1407                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1408                                           act,
1409                                           "Flow types is unsupported by "
1410                                           "hns3's RSS");
1411         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1412                 return rte_flow_error_set(error, ENOTSUP,
1413                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1414                                           "RSS hash func are not supported");
1415         if (rss->level)
1416                 return rte_flow_error_set(error, ENOTSUP,
1417                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1418                                           "a nonzero RSS encapsulation level is not supported");
1419         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1420                 return rte_flow_error_set(error, ENOTSUP,
1421                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1422                                           "RSS hash key must be exactly 40 bytes");
1423
1424         if (!hns3_rss_input_tuple_supported(hw, rss))
1425                 return rte_flow_error_set(error, EINVAL,
1426                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1427                                           &rss->types,
1428                                           "input RSS types are not supported");
1429
1430         act_index++;
1431
1432         /* Check if the next not void action is END */
1433         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1434         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1435                 memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1436                 return rte_flow_error_set(error, EINVAL,
1437                                           RTE_FLOW_ERROR_TYPE_ACTION,
1438                                           act, "Not supported action.");
1439         }
1440
1441         return 0;
1442 }
1443
1444 static int
1445 hns3_disable_rss(struct hns3_hw *hw)
1446 {
1447         int ret;
1448
1449         /* Redirected the redirection table to queue 0 */
1450         ret = hns3_rss_reset_indir_table(hw);
1451         if (ret)
1452                 return ret;
1453
1454         /* Disable RSS */
1455         hw->rss_info.conf.types = 0;
1456         hw->rss_dis_flag = true;
1457
1458         return 0;
1459 }
1460
1461 static void
1462 hns3_adjust_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1463 {
1464         if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1465                 hns3_warn(hw, "Default RSS hash key to be set");
1466                 rss_conf->key = hns3_hash_key;
1467                 rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1468         }
1469 }
1470
1471 static int
1472 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1473                          uint8_t *hash_algo)
1474 {
1475         enum rte_eth_hash_function algo_func = *func;
1476         switch (algo_func) {
1477         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1478                 /* Keep *hash_algo as what it used to be */
1479                 algo_func = hw->rss_info.conf.func;
1480                 break;
1481         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1482                 *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1483                 break;
1484         case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1485                 *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1486                 break;
1487         case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1488                 *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1489                 break;
1490         default:
1491                 hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
1492                          algo_func);
1493                 return -EINVAL;
1494         }
1495         *func = algo_func;
1496
1497         return 0;
1498 }
1499
1500 static int
1501 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1502 {
1503         struct hns3_rss_tuple_cfg *tuple;
1504         int ret;
1505
1506         hns3_adjust_rss_key(hw, rss_config);
1507
1508         ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1509                                        &hw->rss_info.hash_algo);
1510         if (ret)
1511                 return ret;
1512
1513         ret = hns3_rss_set_algo_key(hw, rss_config->key);
1514         if (ret)
1515                 return ret;
1516
1517         hw->rss_info.conf.func = rss_config->func;
1518
1519         tuple = &hw->rss_info.rss_tuple_sets;
1520         ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1521         if (ret)
1522                 hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1523
1524         return ret;
1525 }
1526
1527 static int
1528 hns3_update_indir_table(struct rte_eth_dev *dev,
1529                         const struct rte_flow_action_rss *conf, uint16_t num)
1530 {
1531         struct hns3_adapter *hns = dev->data->dev_private;
1532         struct hns3_hw *hw = &hns->hw;
1533         uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1534         uint16_t j;
1535         uint32_t i;
1536
1537         /* Fill in redirection table */
1538         memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1539                sizeof(hw->rss_info.rss_indirection_tbl));
1540         for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1541                 j %= num;
1542                 if (conf->queue[j] >= hw->alloc_rss_size) {
1543                         hns3_err(hw, "queue id(%u) set to redirection table "
1544                                  "exceeds queue number(%u) allocated to a TC.",
1545                                  conf->queue[j], hw->alloc_rss_size);
1546                         return -EINVAL;
1547                 }
1548                 indir_tbl[i] = conf->queue[j];
1549         }
1550
1551         return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1552 }
1553
1554 static int
1555 hns3_config_rss_filter(struct rte_eth_dev *dev,
1556                        const struct hns3_rss_conf *conf, bool add)
1557 {
1558         struct hns3_adapter *hns = dev->data->dev_private;
1559         struct hns3_rss_conf_ele *rss_filter_ptr;
1560         struct hns3_hw *hw = &hns->hw;
1561         struct hns3_rss_conf *rss_info;
1562         uint64_t flow_types;
1563         uint16_t num;
1564         int ret;
1565
1566         struct rte_flow_action_rss rss_flow_conf = {
1567                 .func = conf->conf.func,
1568                 .level = conf->conf.level,
1569                 .types = conf->conf.types,
1570                 .key_len = conf->conf.key_len,
1571                 .queue_num = conf->conf.queue_num,
1572                 .key = conf->conf.key_len ?
1573                     (void *)(uintptr_t)conf->conf.key : NULL,
1574                 .queue = conf->conf.queue,
1575         };
1576
1577         /* Filter the unsupported flow types */
1578         flow_types = conf->conf.types ?
1579                      rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1580                      hw->rss_info.conf.types;
1581         if (flow_types != rss_flow_conf.types)
1582                 hns3_warn(hw, "modified RSS types based on hardware support, "
1583                               "requested:0x%" PRIx64 " configured:0x%" PRIx64,
1584                           rss_flow_conf.types, flow_types);
1585         /* Update the useful flow types */
1586         rss_flow_conf.types = flow_types;
1587
1588         rss_info = &hw->rss_info;
1589         if (!add) {
1590                 if (!conf->valid)
1591                         return 0;
1592
1593                 ret = hns3_disable_rss(hw);
1594                 if (ret) {
1595                         hns3_err(hw, "RSS disable failed(%d)", ret);
1596                         return ret;
1597                 }
1598
1599                 if (rss_flow_conf.queue_num) {
1600                         /*
1601                          * Due the content of queue pointer have been reset to
1602                          * 0, the rss_info->conf.queue should be set to NULL
1603                          */
1604                         rss_info->conf.queue = NULL;
1605                         rss_info->conf.queue_num = 0;
1606                 }
1607
1608                 /* set RSS func invalid after flushed */
1609                 rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1610                 return 0;
1611         }
1612
1613         /* Set rx queues to use */
1614         num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1615         if (rss_flow_conf.queue_num > num)
1616                 hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1617                           rss_flow_conf.queue_num);
1618         hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1619
1620         rte_spinlock_lock(&hw->lock);
1621         if (num) {
1622                 ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1623                 if (ret)
1624                         goto rss_config_err;
1625         }
1626
1627         /* Set hash algorithm and flow types by the user's config */
1628         ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1629         if (ret)
1630                 goto rss_config_err;
1631
1632         ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1633         if (ret) {
1634                 hns3_err(hw, "RSS config init fail(%d)", ret);
1635                 goto rss_config_err;
1636         }
1637
1638         /*
1639          * When create a new RSS rule, the old rule will be overlaid and set
1640          * invalid.
1641          */
1642         TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries)
1643                 rss_filter_ptr->filter_info.valid = false;
1644
1645 rss_config_err:
1646         rte_spinlock_unlock(&hw->lock);
1647
1648         return ret;
1649 }
1650
1651 static int
1652 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1653 {
1654         struct hns3_adapter *hns = dev->data->dev_private;
1655         struct hns3_rss_conf_ele *rss_filter_ptr;
1656         struct hns3_hw *hw = &hns->hw;
1657         int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1658         int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1659         int ret = 0;
1660
1661         rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1662         while (rss_filter_ptr) {
1663                 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1664                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1665                                              false);
1666                 if (ret)
1667                         rss_rule_fail_cnt++;
1668                 else
1669                         rss_rule_succ_cnt++;
1670                 rte_free(rss_filter_ptr);
1671                 rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1672         }
1673
1674         if (rss_rule_fail_cnt) {
1675                 hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1676                              "fail num = %d", rss_rule_succ_cnt,
1677                              rss_rule_fail_cnt);
1678                 ret = -EIO;
1679         }
1680
1681         return ret;
1682 }
1683
1684 int
1685 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1686 {
1687         struct hns3_adapter *hns = dev->data->dev_private;
1688         struct hns3_hw *hw = &hns->hw;
1689
1690         /* When user flush all rules, it doesn't need to restore RSS rule */
1691         if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1692                 return 0;
1693
1694         return hns3_config_rss_filter(dev, &hw->rss_info, true);
1695 }
1696
1697 static int
1698 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1699                     const struct hns3_rss_conf *conf, bool add)
1700 {
1701         struct hns3_adapter *hns = dev->data->dev_private;
1702         struct hns3_hw *hw = &hns->hw;
1703         bool ret;
1704
1705         ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1706         if (ret) {
1707                 hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1708                 return -EINVAL;
1709         }
1710
1711         return hns3_config_rss_filter(dev, conf, add);
1712 }
1713
1714 static int
1715 hns3_flow_args_check(const struct rte_flow_attr *attr,
1716                      const struct rte_flow_item pattern[],
1717                      const struct rte_flow_action actions[],
1718                      struct rte_flow_error *error)
1719 {
1720         if (pattern == NULL)
1721                 return rte_flow_error_set(error, EINVAL,
1722                                           RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1723                                           NULL, "NULL pattern.");
1724
1725         if (actions == NULL)
1726                 return rte_flow_error_set(error, EINVAL,
1727                                           RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1728                                           NULL, "NULL action.");
1729
1730         if (attr == NULL)
1731                 return rte_flow_error_set(error, EINVAL,
1732                                           RTE_FLOW_ERROR_TYPE_ATTR,
1733                                           NULL, "NULL attribute.");
1734
1735         return hns3_check_attr(attr, error);
1736 }
1737
1738 /*
1739  * Check if the flow rule is supported by hns3.
1740  * It only checks the format. Don't guarantee the rule can be programmed into
1741  * the HW. Because there can be no enough room for the rule.
1742  */
1743 static int
1744 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1745                    const struct rte_flow_item pattern[],
1746                    const struct rte_flow_action actions[],
1747                    struct rte_flow_error *error)
1748 {
1749         struct hns3_fdir_rule fdir_rule;
1750         int ret;
1751
1752         ret = hns3_flow_args_check(attr, pattern, actions, error);
1753         if (ret)
1754                 return ret;
1755
1756         if (hns3_find_rss_general_action(pattern, actions))
1757                 return hns3_parse_rss_filter(dev, actions, error);
1758
1759         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1760         return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1761 }
1762
1763 static int
1764 hns3_flow_create_rss_rule(struct rte_eth_dev *dev,
1765                           const struct rte_flow_action *act,
1766                           struct rte_flow *flow)
1767 {
1768         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1769         struct hns3_rss_conf_ele *rss_filter_ptr;
1770         const struct hns3_rss_conf *rss_conf;
1771         int ret;
1772
1773         rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1774                                      sizeof(struct hns3_rss_conf_ele), 0);
1775         if (rss_filter_ptr == NULL) {
1776                 hns3_err(hw, "failed to allocate hns3_rss_filter memory");
1777                 return -ENOMEM;
1778         }
1779
1780         /*
1781          * After all the preceding tasks are successfully configured, configure
1782          * rules to the hardware to simplify the rollback of rules in the
1783          * hardware.
1784          */
1785         rss_conf = (const struct hns3_rss_conf *)act->conf;
1786         ret = hns3_flow_parse_rss(dev, rss_conf, true);
1787         if (ret != 0) {
1788                 rte_free(rss_filter_ptr);
1789                 return ret;
1790         }
1791
1792         hns3_rss_conf_copy(&rss_filter_ptr->filter_info, &rss_conf->conf);
1793         rss_filter_ptr->filter_info.valid = true;
1794         TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
1795         flow->rule = rss_filter_ptr;
1796         flow->filter_type = RTE_ETH_FILTER_HASH;
1797
1798         return 0;
1799 }
1800
1801 static int
1802 hns3_flow_create_fdir_rule(struct rte_eth_dev *dev,
1803                            const struct rte_flow_item pattern[],
1804                            const struct rte_flow_action actions[],
1805                            struct rte_flow_error *error,
1806                            struct rte_flow *flow)
1807 {
1808         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1809         struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1810         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1811         struct hns3_fdir_rule fdir_rule;
1812         bool indir;
1813         int ret;
1814
1815         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1816         ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1817         if (ret != 0)
1818                 return ret;
1819
1820         indir = !!(fdir_rule.flags & HNS3_RULE_FLAG_COUNTER_INDIR);
1821         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1822                 ret = hns3_counter_new(dev, indir, fdir_rule.act_cnt.id,
1823                                        error);
1824                 if (ret != 0)
1825                         return ret;
1826
1827                 flow->counter_id = fdir_rule.act_cnt.id;
1828         }
1829
1830         fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1831                                     sizeof(struct hns3_fdir_rule_ele), 0);
1832         if (fdir_rule_ptr == NULL) {
1833                 hns3_err(hw, "failed to allocate fdir_rule memory.");
1834                 ret = -ENOMEM;
1835                 goto err_malloc;
1836         }
1837
1838         /*
1839          * After all the preceding tasks are successfully configured, configure
1840          * rules to the hardware to simplify the rollback of rules in the
1841          * hardware.
1842          */
1843         ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1844         if (ret != 0)
1845                 goto err_fdir_filter;
1846
1847         memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1848                 sizeof(struct hns3_fdir_rule));
1849         TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1850         flow->rule = fdir_rule_ptr;
1851         flow->filter_type = RTE_ETH_FILTER_FDIR;
1852
1853         return 0;
1854
1855 err_fdir_filter:
1856         rte_free(fdir_rule_ptr);
1857 err_malloc:
1858         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1859                 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1860
1861         return ret;
1862 }
1863
1864 /*
1865  * Create or destroy a flow rule.
1866  * Theorically one rule can match more than one filters.
1867  * We will let it use the filter which it hit first.
1868  * So, the sequence matters.
1869  */
1870 static struct rte_flow *
1871 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1872                  const struct rte_flow_item pattern[],
1873                  const struct rte_flow_action actions[],
1874                  struct rte_flow_error *error)
1875 {
1876         struct hns3_adapter *hns = dev->data->dev_private;
1877         struct hns3_hw *hw = &hns->hw;
1878         struct hns3_flow_mem *flow_node;
1879         const struct rte_flow_action *act;
1880         struct rte_flow *flow;
1881         int ret;
1882
1883         ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1884         if (ret)
1885                 return NULL;
1886
1887         flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1888         if (flow == NULL) {
1889                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1890                                    NULL, "Failed to allocate flow memory");
1891                 return NULL;
1892         }
1893         flow_node = rte_zmalloc("hns3 flow node",
1894                                 sizeof(struct hns3_flow_mem), 0);
1895         if (flow_node == NULL) {
1896                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1897                                    NULL, "Failed to allocate flow list memory");
1898                 rte_free(flow);
1899                 return NULL;
1900         }
1901
1902         flow_node->flow = flow;
1903         TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
1904
1905         act = hns3_find_rss_general_action(pattern, actions);
1906         if (act)
1907                 ret = hns3_flow_create_rss_rule(dev, act, flow);
1908         else
1909                 ret = hns3_flow_create_fdir_rule(dev, pattern, actions,
1910                                                  error, flow);
1911         if (ret == 0)
1912                 return flow;
1913
1914         rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1915                            "Failed to create flow");
1916         TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1917         rte_free(flow_node);
1918         rte_free(flow);
1919
1920         return NULL;
1921 }
1922
1923 /* Destroy a flow rule on hns3. */
1924 static int
1925 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1926                   struct rte_flow_error *error)
1927 {
1928         struct hns3_adapter *hns = dev->data->dev_private;
1929         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1930         struct hns3_rss_conf_ele *rss_filter_ptr;
1931         struct hns3_flow_mem *flow_node;
1932         enum rte_filter_type filter_type;
1933         struct hns3_fdir_rule fdir_rule;
1934         struct hns3_hw *hw = &hns->hw;
1935         int ret;
1936
1937         if (flow == NULL)
1938                 return rte_flow_error_set(error, EINVAL,
1939                                           RTE_FLOW_ERROR_TYPE_HANDLE,
1940                                           flow, "Flow is NULL");
1941
1942         filter_type = flow->filter_type;
1943         switch (filter_type) {
1944         case RTE_ETH_FILTER_FDIR:
1945                 fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1946                 memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1947                            sizeof(struct hns3_fdir_rule));
1948
1949                 ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1950                 if (ret)
1951                         return rte_flow_error_set(error, EIO,
1952                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1953                                                   flow,
1954                                                   "Destroy FDIR fail.Try again");
1955                 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1956                         hns3_counter_release(dev, fdir_rule.act_cnt.id);
1957                 TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1958                 rte_free(fdir_rule_ptr);
1959                 fdir_rule_ptr = NULL;
1960                 break;
1961         case RTE_ETH_FILTER_HASH:
1962                 rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1963                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1964                                              false);
1965                 if (ret)
1966                         return rte_flow_error_set(error, EIO,
1967                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1968                                                   flow,
1969                                                   "Destroy RSS fail.Try again");
1970                 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1971                 rte_free(rss_filter_ptr);
1972                 rss_filter_ptr = NULL;
1973                 break;
1974         default:
1975                 return rte_flow_error_set(error, EINVAL,
1976                                           RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1977                                           "Unsupported filter type");
1978         }
1979
1980         TAILQ_FOREACH(flow_node, &hw->flow_list, entries) {
1981                 if (flow_node->flow == flow) {
1982                         TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1983                         rte_free(flow_node);
1984                         flow_node = NULL;
1985                         break;
1986                 }
1987         }
1988         rte_free(flow);
1989
1990         return 0;
1991 }
1992
1993 /*  Destroy all flow rules associated with a port on hns3. */
1994 static int
1995 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1996 {
1997         struct hns3_adapter *hns = dev->data->dev_private;
1998         int ret;
1999
2000         /* FDIR is available only in PF driver */
2001         if (!hns->is_vf) {
2002                 ret = hns3_clear_all_fdir_filter(hns);
2003                 if (ret) {
2004                         rte_flow_error_set(error, ret,
2005                                            RTE_FLOW_ERROR_TYPE_HANDLE,
2006                                            NULL, "Failed to flush rule");
2007                         return ret;
2008                 }
2009                 hns3_counter_flush(dev);
2010         }
2011
2012         ret = hns3_clear_rss_filter(dev);
2013         if (ret) {
2014                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
2015                                    NULL, "Failed to flush rss filter");
2016                 return ret;
2017         }
2018
2019         hns3_filterlist_flush(dev);
2020
2021         return 0;
2022 }
2023
2024 /* Query an existing flow rule. */
2025 static int
2026 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
2027                 const struct rte_flow_action *actions, void *data,
2028                 struct rte_flow_error *error)
2029 {
2030         struct rte_flow_action_rss *rss_conf;
2031         struct hns3_rss_conf_ele *rss_rule;
2032         struct rte_flow_query_count *qc;
2033         int ret;
2034
2035         if (!flow->rule)
2036                 return rte_flow_error_set(error, EINVAL,
2037                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
2038
2039         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2040                 switch (actions->type) {
2041                 case RTE_FLOW_ACTION_TYPE_VOID:
2042                         break;
2043                 case RTE_FLOW_ACTION_TYPE_COUNT:
2044                         qc = (struct rte_flow_query_count *)data;
2045                         ret = hns3_counter_query(dev, flow, qc, error);
2046                         if (ret)
2047                                 return ret;
2048                         break;
2049                 case RTE_FLOW_ACTION_TYPE_RSS:
2050                         if (flow->filter_type != RTE_ETH_FILTER_HASH) {
2051                                 return rte_flow_error_set(error, ENOTSUP,
2052                                         RTE_FLOW_ERROR_TYPE_ACTION,
2053                                         actions, "action is not supported");
2054                         }
2055                         rss_conf = (struct rte_flow_action_rss *)data;
2056                         rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
2057                         rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
2058                                    sizeof(struct rte_flow_action_rss));
2059                         break;
2060                 default:
2061                         return rte_flow_error_set(error, ENOTSUP,
2062                                 RTE_FLOW_ERROR_TYPE_ACTION,
2063                                 actions, "action is not supported");
2064                 }
2065         }
2066
2067         return 0;
2068 }
2069
2070 static int
2071 hns3_flow_validate_wrap(struct rte_eth_dev *dev,
2072                         const struct rte_flow_attr *attr,
2073                         const struct rte_flow_item pattern[],
2074                         const struct rte_flow_action actions[],
2075                         struct rte_flow_error *error)
2076 {
2077         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2078         int ret;
2079
2080         pthread_mutex_lock(&hw->flows_lock);
2081         ret = hns3_flow_validate(dev, attr, pattern, actions, error);
2082         pthread_mutex_unlock(&hw->flows_lock);
2083
2084         return ret;
2085 }
2086
2087 static struct rte_flow *
2088 hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2089                       const struct rte_flow_item pattern[],
2090                       const struct rte_flow_action actions[],
2091                       struct rte_flow_error *error)
2092 {
2093         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2094         struct rte_flow *flow;
2095
2096         pthread_mutex_lock(&hw->flows_lock);
2097         flow = hns3_flow_create(dev, attr, pattern, actions, error);
2098         pthread_mutex_unlock(&hw->flows_lock);
2099
2100         return flow;
2101 }
2102
2103 static int
2104 hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2105                        struct rte_flow_error *error)
2106 {
2107         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2108         int ret;
2109
2110         pthread_mutex_lock(&hw->flows_lock);
2111         ret = hns3_flow_destroy(dev, flow, error);
2112         pthread_mutex_unlock(&hw->flows_lock);
2113
2114         return ret;
2115 }
2116
2117 static int
2118 hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
2119 {
2120         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2121         int ret;
2122
2123         pthread_mutex_lock(&hw->flows_lock);
2124         ret = hns3_flow_flush(dev, error);
2125         pthread_mutex_unlock(&hw->flows_lock);
2126
2127         return ret;
2128 }
2129
2130 static int
2131 hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2132                      const struct rte_flow_action *actions, void *data,
2133                      struct rte_flow_error *error)
2134 {
2135         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2136         int ret;
2137
2138         pthread_mutex_lock(&hw->flows_lock);
2139         ret = hns3_flow_query(dev, flow, actions, data, error);
2140         pthread_mutex_unlock(&hw->flows_lock);
2141
2142         return ret;
2143 }
2144
2145 static int
2146 hns3_check_indir_action(const struct rte_flow_indir_action_conf *conf,
2147                         const struct rte_flow_action *action,
2148                         struct rte_flow_error *error)
2149 {
2150         if (!conf->ingress)
2151                 return rte_flow_error_set(error, EINVAL,
2152                                 RTE_FLOW_ERROR_TYPE_ACTION,
2153                                 NULL, "Indir action ingress can't be zero");
2154
2155         if (conf->egress)
2156                 return rte_flow_error_set(error, EINVAL,
2157                                 RTE_FLOW_ERROR_TYPE_ACTION,
2158                                 NULL, "Indir action not support egress");
2159
2160         if (conf->transfer)
2161                 return rte_flow_error_set(error, EINVAL,
2162                                 RTE_FLOW_ERROR_TYPE_ACTION,
2163                                 NULL, "Indir action not support transfer");
2164
2165         if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
2166                 return rte_flow_error_set(error, EINVAL,
2167                                 RTE_FLOW_ERROR_TYPE_ACTION,
2168                                 NULL, "Indir action only support count");
2169
2170         return 0;
2171 }
2172
2173 static struct rte_flow_action_handle *
2174 hns3_flow_action_create(struct rte_eth_dev *dev,
2175                         const struct rte_flow_indir_action_conf *conf,
2176                         const struct rte_flow_action *action,
2177                         struct rte_flow_error *error)
2178 {
2179         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2180         struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2181         const struct rte_flow_action_count *act_count;
2182         struct rte_flow_action_handle *handle = NULL;
2183         struct hns3_flow_counter *counter;
2184
2185         if (hns3_check_indir_action(conf, action, error))
2186                 return NULL;
2187
2188         handle = rte_zmalloc("hns3 action handle",
2189                              sizeof(struct rte_flow_action_handle), 0);
2190         if (handle == NULL) {
2191                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2192                                    NULL, "Failed to allocate action memory");
2193                 return NULL;
2194         }
2195
2196         pthread_mutex_lock(&hw->flows_lock);
2197
2198         act_count = (const struct rte_flow_action_count *)action->conf;
2199         if (act_count->id >= pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]) {
2200                 rte_flow_error_set(error, EINVAL,
2201                                    RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2202                                    action, "Invalid counter id");
2203                 goto err_exit;
2204         }
2205
2206         if (hns3_counter_new(dev, false, act_count->id, error))
2207                 goto err_exit;
2208
2209         counter = hns3_counter_lookup(dev, act_count->id);
2210         if (counter == NULL) {
2211                 rte_flow_error_set(error, EINVAL,
2212                                    RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2213                                    action, "Counter id not found");
2214                 goto err_exit;
2215         }
2216
2217         counter->indirect = true;
2218         handle->indirect_type = HNS3_INDIRECT_ACTION_TYPE_COUNT;
2219         handle->counter_id = counter->id;
2220
2221         pthread_mutex_unlock(&hw->flows_lock);
2222         return handle;
2223
2224 err_exit:
2225         pthread_mutex_unlock(&hw->flows_lock);
2226         rte_free(handle);
2227         return NULL;
2228 }
2229
2230 static int
2231 hns3_flow_action_destroy(struct rte_eth_dev *dev,
2232                          struct rte_flow_action_handle *handle,
2233                          struct rte_flow_error *error)
2234 {
2235         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2236         struct hns3_flow_counter *counter;
2237
2238         pthread_mutex_lock(&hw->flows_lock);
2239
2240         if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2241                 pthread_mutex_unlock(&hw->flows_lock);
2242                 return rte_flow_error_set(error, EINVAL,
2243                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2244                                         handle, "Invalid indirect type");
2245         }
2246
2247         counter = hns3_counter_lookup(dev, handle->counter_id);
2248         if (counter == NULL) {
2249                 pthread_mutex_unlock(&hw->flows_lock);
2250                 return rte_flow_error_set(error, EINVAL,
2251                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2252                                 handle, "Counter id not exist");
2253         }
2254
2255         if (counter->ref_cnt > 1) {
2256                 pthread_mutex_unlock(&hw->flows_lock);
2257                 return rte_flow_error_set(error, EBUSY,
2258                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2259                                 handle, "Counter id in use");
2260         }
2261
2262         (void)hns3_counter_release(dev, handle->counter_id);
2263         rte_free(handle);
2264
2265         pthread_mutex_unlock(&hw->flows_lock);
2266         return 0;
2267 }
2268
2269 static int
2270 hns3_flow_action_query(struct rte_eth_dev *dev,
2271                  const struct rte_flow_action_handle *handle,
2272                  void *data,
2273                  struct rte_flow_error *error)
2274 {
2275         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2276         struct rte_flow flow;
2277         int ret;
2278
2279         pthread_mutex_lock(&hw->flows_lock);
2280
2281         if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2282                 pthread_mutex_unlock(&hw->flows_lock);
2283                 return rte_flow_error_set(error, EINVAL,
2284                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2285                                         handle, "Invalid indirect type");
2286         }
2287
2288         memset(&flow, 0, sizeof(flow));
2289         flow.counter_id = handle->counter_id;
2290         ret = hns3_counter_query(dev, &flow,
2291                                  (struct rte_flow_query_count *)data, error);
2292         pthread_mutex_unlock(&hw->flows_lock);
2293         return ret;
2294 }
2295
2296 static const struct rte_flow_ops hns3_flow_ops = {
2297         .validate = hns3_flow_validate_wrap,
2298         .create = hns3_flow_create_wrap,
2299         .destroy = hns3_flow_destroy_wrap,
2300         .flush = hns3_flow_flush_wrap,
2301         .query = hns3_flow_query_wrap,
2302         .isolate = NULL,
2303         .action_handle_create = hns3_flow_action_create,
2304         .action_handle_destroy = hns3_flow_action_destroy,
2305         .action_handle_query = hns3_flow_action_query,
2306 };
2307
2308 int
2309 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2310                       const struct rte_flow_ops **ops)
2311 {
2312         struct hns3_hw *hw;
2313
2314         hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2315         if (hw->adapter_state >= HNS3_NIC_CLOSED)
2316                 return -ENODEV;
2317
2318         *ops = &hns3_flow_ops;
2319         return 0;
2320 }
2321
2322 void
2323 hns3_flow_init(struct rte_eth_dev *dev)
2324 {
2325         struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2326         pthread_mutexattr_t attr;
2327
2328         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2329                 return;
2330
2331         pthread_mutexattr_init(&attr);
2332         pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
2333         pthread_mutex_init(&hw->flows_lock, &attr);
2334         dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2335
2336         TAILQ_INIT(&hw->flow_fdir_list);
2337         TAILQ_INIT(&hw->flow_rss_list);
2338         TAILQ_INIT(&hw->flow_list);
2339 }
2340
2341 void
2342 hns3_flow_uninit(struct rte_eth_dev *dev)
2343 {
2344         struct rte_flow_error error;
2345         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2346                 hns3_flow_flush_wrap(dev, &error);
2347 }