net/hns3: fix flow counter value
[dpdk.git] / drivers / net / hns3 / hns3_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4
5 #include <rte_flow_driver.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11
12 /* Default default keys */
13 static uint8_t hns3_hash_key[] = {
14         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
15         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
16         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
17         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
18         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
19 };
20
21 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
22 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
23
24 /* Special Filter id for non-specific packet flagging. Don't change value */
25 #define HNS3_MAX_FILTER_ID      0x0FFF
26
27 #define ETHER_TYPE_MASK         0xFFFF
28 #define IPPROTO_MASK            0xFF
29 #define TUNNEL_TYPE_MASK        0xFFFF
30
31 #define HNS3_TUNNEL_TYPE_VXLAN          0x12B5
32 #define HNS3_TUNNEL_TYPE_VXLAN_GPE      0x12B6
33 #define HNS3_TUNNEL_TYPE_GENEVE         0x17C1
34 #define HNS3_TUNNEL_TYPE_NVGRE          0x6558
35
36 static enum rte_flow_item_type first_items[] = {
37         RTE_FLOW_ITEM_TYPE_ETH,
38         RTE_FLOW_ITEM_TYPE_IPV4,
39         RTE_FLOW_ITEM_TYPE_IPV6,
40         RTE_FLOW_ITEM_TYPE_TCP,
41         RTE_FLOW_ITEM_TYPE_UDP,
42         RTE_FLOW_ITEM_TYPE_SCTP,
43         RTE_FLOW_ITEM_TYPE_ICMP,
44         RTE_FLOW_ITEM_TYPE_NVGRE,
45         RTE_FLOW_ITEM_TYPE_VXLAN,
46         RTE_FLOW_ITEM_TYPE_GENEVE,
47         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
48 };
49
50 static enum rte_flow_item_type L2_next_items[] = {
51         RTE_FLOW_ITEM_TYPE_VLAN,
52         RTE_FLOW_ITEM_TYPE_IPV4,
53         RTE_FLOW_ITEM_TYPE_IPV6
54 };
55
56 static enum rte_flow_item_type L3_next_items[] = {
57         RTE_FLOW_ITEM_TYPE_TCP,
58         RTE_FLOW_ITEM_TYPE_UDP,
59         RTE_FLOW_ITEM_TYPE_SCTP,
60         RTE_FLOW_ITEM_TYPE_NVGRE,
61         RTE_FLOW_ITEM_TYPE_ICMP
62 };
63
64 static enum rte_flow_item_type L4_next_items[] = {
65         RTE_FLOW_ITEM_TYPE_VXLAN,
66         RTE_FLOW_ITEM_TYPE_GENEVE,
67         RTE_FLOW_ITEM_TYPE_VXLAN_GPE
68 };
69
70 static enum rte_flow_item_type tunnel_next_items[] = {
71         RTE_FLOW_ITEM_TYPE_ETH,
72         RTE_FLOW_ITEM_TYPE_VLAN
73 };
74
75 struct items_step_mngr {
76         enum rte_flow_item_type *items;
77         int count;
78 };
79
80 static inline void
81 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
82 {
83         size_t i;
84
85         for (i = 0; i < len; i++)
86                 dst[i] = rte_be_to_cpu_32(src[i]);
87 }
88
89 /*
90  * This function is used to find rss general action.
91  * 1. As we know RSS is used to spread packets among several queues, the flow
92  *    API provide the struct rte_flow_action_rss, user could config its field
93  *    sush as: func/level/types/key/queue to control RSS function.
94  * 2. The flow API also supports queue region configuration for hns3. It was
95  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
96  *    which action is RSS queues region.
97  * 3. When action is RSS, we use the following rule to distinguish:
98  *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
99  *            region configuration.
100  *    Case other: an rss general action.
101  */
102 static const struct rte_flow_action *
103 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
104                              const struct rte_flow_action actions[])
105 {
106         const struct rte_flow_action *act = NULL;
107         const struct hns3_rss_conf *rss;
108         bool have_eth = false;
109
110         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
111                 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
112                         act = actions;
113                         break;
114                 }
115         }
116         if (!act)
117                 return NULL;
118
119         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
120                 if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
121                         have_eth = true;
122                         break;
123                 }
124         }
125
126         rss = act->conf;
127         if (have_eth && rss->conf.queue_num) {
128                 /*
129                  * Pattern have ETH and action's queue_num > 0, indicate this is
130                  * queue region configuration.
131                  * Because queue region is implemented by FDIR + RSS in hns3
132                  * hardware, it needs to enter FDIR process, so here return NULL
133                  * to avoid enter RSS process.
134                  */
135                 return NULL;
136         }
137
138         return act;
139 }
140
141 static inline struct hns3_flow_counter *
142 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
143 {
144         struct hns3_adapter *hns = dev->data->dev_private;
145         struct hns3_pf *pf = &hns->pf;
146         struct hns3_flow_counter *cnt;
147
148         LIST_FOREACH(cnt, &pf->flow_counters, next) {
149                 if (cnt->id == id)
150                         return cnt;
151         }
152         return NULL;
153 }
154
155 static int
156 hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
157                  struct rte_flow_error *error)
158 {
159         struct hns3_adapter *hns = dev->data->dev_private;
160         struct hns3_pf *pf = &hns->pf;
161         struct hns3_hw *hw = &hns->hw;
162         struct hns3_flow_counter *cnt;
163         uint64_t value;
164         int ret;
165
166         cnt = hns3_counter_lookup(dev, id);
167         if (cnt) {
168                 if (!cnt->shared || cnt->shared != shared)
169                         return rte_flow_error_set(error, ENOTSUP,
170                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
171                                 cnt,
172                                 "Counter id is used, shared flag not match");
173                 cnt->ref_cnt++;
174                 return 0;
175         }
176
177         /* Clear the counter by read ops because the counter is read-clear */
178         ret = hns3_get_count(hw, id, &value);
179         if (ret)
180                 return rte_flow_error_set(error, EIO,
181                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
182                                           "Clear counter failed!");
183
184         cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
185         if (cnt == NULL)
186                 return rte_flow_error_set(error, ENOMEM,
187                                           RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
188                                           "Alloc mem for counter failed");
189         cnt->id = id;
190         cnt->shared = shared;
191         cnt->ref_cnt = 1;
192         cnt->hits = 0;
193         LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
194         return 0;
195 }
196
197 static int
198 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
199                    struct rte_flow_query_count *qc,
200                    struct rte_flow_error *error)
201 {
202         struct hns3_adapter *hns = dev->data->dev_private;
203         struct hns3_flow_counter *cnt;
204         uint64_t value;
205         int ret;
206
207         /* FDIR is available only in PF driver */
208         if (hns->is_vf)
209                 return rte_flow_error_set(error, ENOTSUP,
210                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
211                                           "Fdir is not supported in VF");
212         cnt = hns3_counter_lookup(dev, flow->counter_id);
213         if (cnt == NULL)
214                 return rte_flow_error_set(error, EINVAL,
215                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
216                                           "Can't find counter id");
217
218         ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
219         if (ret) {
220                 rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
221                                    NULL, "Read counter fail.");
222                 return ret;
223         }
224         qc->hits_set = 1;
225         qc->hits = value;
226
227         return 0;
228 }
229
230 static int
231 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
232 {
233         struct hns3_adapter *hns = dev->data->dev_private;
234         struct hns3_hw *hw = &hns->hw;
235         struct hns3_flow_counter *cnt;
236
237         cnt = hns3_counter_lookup(dev, id);
238         if (cnt == NULL) {
239                 hns3_err(hw, "Can't find available counter to release");
240                 return -EINVAL;
241         }
242         cnt->ref_cnt--;
243         if (cnt->ref_cnt == 0) {
244                 LIST_REMOVE(cnt, next);
245                 rte_free(cnt);
246         }
247         return 0;
248 }
249
250 static void
251 hns3_counter_flush(struct rte_eth_dev *dev)
252 {
253         struct hns3_adapter *hns = dev->data->dev_private;
254         struct hns3_pf *pf = &hns->pf;
255         struct hns3_flow_counter *cnt_ptr;
256
257         cnt_ptr = LIST_FIRST(&pf->flow_counters);
258         while (cnt_ptr) {
259                 LIST_REMOVE(cnt_ptr, next);
260                 rte_free(cnt_ptr);
261                 cnt_ptr = LIST_FIRST(&pf->flow_counters);
262         }
263 }
264
265 static int
266 hns3_handle_action_queue(struct rte_eth_dev *dev,
267                          const struct rte_flow_action *action,
268                          struct hns3_fdir_rule *rule,
269                          struct rte_flow_error *error)
270 {
271         struct hns3_adapter *hns = dev->data->dev_private;
272         const struct rte_flow_action_queue *queue;
273         struct hns3_hw *hw = &hns->hw;
274
275         queue = (const struct rte_flow_action_queue *)action->conf;
276         if (queue->index >= hw->used_rx_queues) {
277                 hns3_err(hw, "queue ID(%u) is greater than number of "
278                           "available queue (%u) in driver.",
279                           queue->index, hw->used_rx_queues);
280                 return rte_flow_error_set(error, EINVAL,
281                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
282                                           action, "Invalid queue ID in PF");
283         }
284
285         rule->queue_id = queue->index;
286         rule->nb_queues = 1;
287         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
288         return 0;
289 }
290
291 static int
292 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
293                                 const struct rte_flow_action *action,
294                                 struct hns3_fdir_rule *rule,
295                                 struct rte_flow_error *error)
296 {
297         struct hns3_adapter *hns = dev->data->dev_private;
298         const struct rte_flow_action_rss *conf = action->conf;
299         struct hns3_hw *hw = &hns->hw;
300         uint16_t idx;
301
302         if (!hns3_dev_fd_queue_region_supported(hw))
303                 return rte_flow_error_set(error, ENOTSUP,
304                         RTE_FLOW_ERROR_TYPE_ACTION, action,
305                         "Not support config queue region!");
306
307         if ((!rte_is_power_of_2(conf->queue_num)) ||
308                 conf->queue_num > hw->rss_size_max ||
309                 conf->queue[0] >= hw->used_rx_queues ||
310                 conf->queue[0] + conf->queue_num > hw->used_rx_queues) {
311                 return rte_flow_error_set(error, EINVAL,
312                         RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
313                         "Invalid start queue ID and queue num! the start queue "
314                         "ID must valid, the queue num must be power of 2 and "
315                         "<= rss_size_max.");
316         }
317
318         for (idx = 1; idx < conf->queue_num; idx++) {
319                 if (conf->queue[idx] != conf->queue[idx - 1] + 1)
320                         return rte_flow_error_set(error, EINVAL,
321                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
322                                 "Invalid queue ID sequence! the queue ID "
323                                 "must be continuous increment.");
324         }
325
326         rule->queue_id = conf->queue[0];
327         rule->nb_queues = conf->queue_num;
328         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
329         return 0;
330 }
331
332 /*
333  * Parse actions structure from the provided pattern.
334  * The pattern is validated as the items are copied.
335  *
336  * @param actions[in]
337  * @param rule[out]
338  *   NIC specfilc actions derived from the actions.
339  * @param error[out]
340  */
341 static int
342 hns3_handle_actions(struct rte_eth_dev *dev,
343                     const struct rte_flow_action actions[],
344                     struct hns3_fdir_rule *rule, struct rte_flow_error *error)
345 {
346         struct hns3_adapter *hns = dev->data->dev_private;
347         const struct rte_flow_action_count *act_count;
348         const struct rte_flow_action_mark *mark;
349         struct hns3_pf *pf = &hns->pf;
350         uint32_t counter_num;
351         int ret;
352
353         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
354                 switch (actions->type) {
355                 case RTE_FLOW_ACTION_TYPE_QUEUE:
356                         ret = hns3_handle_action_queue(dev, actions, rule,
357                                                        error);
358                         if (ret)
359                                 return ret;
360                         break;
361                 case RTE_FLOW_ACTION_TYPE_DROP:
362                         rule->action = HNS3_FD_ACTION_DROP_PACKET;
363                         break;
364                 /*
365                  * Here RSS's real action is queue region.
366                  * Queue region is implemented by FDIR + RSS in hns3 hardware,
367                  * the FDIR's action is one queue region (start_queue_id and
368                  * queue_num), then RSS spread packets to the queue region by
369                  * RSS algorigthm.
370                  */
371                 case RTE_FLOW_ACTION_TYPE_RSS:
372                         ret = hns3_handle_action_queue_region(dev, actions,
373                                                               rule, error);
374                         if (ret)
375                                 return ret;
376                         break;
377                 case RTE_FLOW_ACTION_TYPE_MARK:
378                         mark =
379                             (const struct rte_flow_action_mark *)actions->conf;
380                         if (mark->id >= HNS3_MAX_FILTER_ID)
381                                 return rte_flow_error_set(error, EINVAL,
382                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
383                                                 actions,
384                                                 "Invalid Mark ID");
385                         rule->fd_id = mark->id;
386                         rule->flags |= HNS3_RULE_FLAG_FDID;
387                         break;
388                 case RTE_FLOW_ACTION_TYPE_FLAG:
389                         rule->fd_id = HNS3_MAX_FILTER_ID;
390                         rule->flags |= HNS3_RULE_FLAG_FDID;
391                         break;
392                 case RTE_FLOW_ACTION_TYPE_COUNT:
393                         act_count =
394                             (const struct rte_flow_action_count *)actions->conf;
395                         counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
396                         if (act_count->id >= counter_num)
397                                 return rte_flow_error_set(error, EINVAL,
398                                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
399                                                 actions,
400                                                 "Invalid counter id");
401                         rule->act_cnt = *act_count;
402                         rule->flags |= HNS3_RULE_FLAG_COUNTER;
403                         break;
404                 case RTE_FLOW_ACTION_TYPE_VOID:
405                         break;
406                 default:
407                         return rte_flow_error_set(error, ENOTSUP,
408                                                   RTE_FLOW_ERROR_TYPE_ACTION,
409                                                   NULL, "Unsupported action");
410                 }
411         }
412
413         return 0;
414 }
415
416 static int
417 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
418 {
419         if (!attr->ingress)
420                 return rte_flow_error_set(error, EINVAL,
421                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
422                                           attr, "Ingress can't be zero");
423         if (attr->egress)
424                 return rte_flow_error_set(error, ENOTSUP,
425                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
426                                           attr, "Not support egress");
427         if (attr->transfer)
428                 return rte_flow_error_set(error, ENOTSUP,
429                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
430                                           attr, "No support for transfer");
431         if (attr->priority)
432                 return rte_flow_error_set(error, ENOTSUP,
433                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
434                                           attr, "Not support priority");
435         if (attr->group)
436                 return rte_flow_error_set(error, ENOTSUP,
437                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
438                                           attr, "Not support group");
439         return 0;
440 }
441
442 static int
443 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
444                struct rte_flow_error *error __rte_unused)
445 {
446         const struct rte_flow_item_eth *eth_spec;
447         const struct rte_flow_item_eth *eth_mask;
448
449         /* Only used to describe the protocol stack. */
450         if (item->spec == NULL && item->mask == NULL)
451                 return 0;
452
453         if (item->mask) {
454                 eth_mask = item->mask;
455                 if (eth_mask->type) {
456                         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
457                         rule->key_conf.mask.ether_type =
458                             rte_be_to_cpu_16(eth_mask->type);
459                 }
460                 if (!rte_is_zero_ether_addr(&eth_mask->src)) {
461                         hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
462                         memcpy(rule->key_conf.mask.src_mac,
463                                eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
464                 }
465                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
466                         hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
467                         memcpy(rule->key_conf.mask.dst_mac,
468                                eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
469                 }
470         }
471
472         eth_spec = item->spec;
473         rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
474         memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
475                RTE_ETHER_ADDR_LEN);
476         memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
477                RTE_ETHER_ADDR_LEN);
478         return 0;
479 }
480
481 static int
482 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
483                 struct rte_flow_error *error)
484 {
485         const struct rte_flow_item_vlan *vlan_spec;
486         const struct rte_flow_item_vlan *vlan_mask;
487
488         rule->key_conf.vlan_num++;
489         if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
490                 return rte_flow_error_set(error, EINVAL,
491                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
492                                           "Vlan_num is more than 2");
493
494         /* Only used to describe the protocol stack. */
495         if (item->spec == NULL && item->mask == NULL)
496                 return 0;
497
498         if (item->mask) {
499                 vlan_mask = item->mask;
500                 if (vlan_mask->tci) {
501                         if (rule->key_conf.vlan_num == 1) {
502                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
503                                              1);
504                                 rule->key_conf.mask.vlan_tag1 =
505                                     rte_be_to_cpu_16(vlan_mask->tci);
506                         } else {
507                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
508                                              1);
509                                 rule->key_conf.mask.vlan_tag2 =
510                                     rte_be_to_cpu_16(vlan_mask->tci);
511                         }
512                 }
513         }
514
515         vlan_spec = item->spec;
516         if (rule->key_conf.vlan_num == 1)
517                 rule->key_conf.spec.vlan_tag1 =
518                     rte_be_to_cpu_16(vlan_spec->tci);
519         else
520                 rule->key_conf.spec.vlan_tag2 =
521                     rte_be_to_cpu_16(vlan_spec->tci);
522         return 0;
523 }
524
525 static bool
526 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
527 {
528         if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
529             ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
530             ipv4_mask->hdr.hdr_checksum)
531                 return false;
532
533         return true;
534 }
535
536 static int
537 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
538                 struct rte_flow_error *error)
539 {
540         const struct rte_flow_item_ipv4 *ipv4_spec;
541         const struct rte_flow_item_ipv4 *ipv4_mask;
542
543         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
544         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
545         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
546
547         /* Only used to describe the protocol stack. */
548         if (item->spec == NULL && item->mask == NULL)
549                 return 0;
550
551         if (item->mask) {
552                 ipv4_mask = item->mask;
553                 if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
554                         return rte_flow_error_set(error, EINVAL,
555                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
556                                                   item,
557                                                   "Only support src & dst ip,tos,proto in IPV4");
558                 }
559
560                 if (ipv4_mask->hdr.src_addr) {
561                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
562                         rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
563                             rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
564                 }
565
566                 if (ipv4_mask->hdr.dst_addr) {
567                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
568                         rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
569                             rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
570                 }
571
572                 if (ipv4_mask->hdr.type_of_service) {
573                         hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
574                         rule->key_conf.mask.ip_tos =
575                             ipv4_mask->hdr.type_of_service;
576                 }
577
578                 if (ipv4_mask->hdr.next_proto_id) {
579                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
580                         rule->key_conf.mask.ip_proto =
581                             ipv4_mask->hdr.next_proto_id;
582                 }
583         }
584
585         ipv4_spec = item->spec;
586         rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
587             rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
588         rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
589             rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
590         rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
591         rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
592         return 0;
593 }
594
595 static int
596 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
597                 struct rte_flow_error *error)
598 {
599         const struct rte_flow_item_ipv6 *ipv6_spec;
600         const struct rte_flow_item_ipv6 *ipv6_mask;
601
602         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
603         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
604         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
605
606         /* Only used to describe the protocol stack. */
607         if (item->spec == NULL && item->mask == NULL)
608                 return 0;
609
610         if (item->mask) {
611                 ipv6_mask = item->mask;
612                 if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
613                     ipv6_mask->hdr.hop_limits) {
614                         return rte_flow_error_set(error, EINVAL,
615                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
616                                                   item,
617                                                   "Only support src & dst ip,proto in IPV6");
618                 }
619                 net_addr_to_host(rule->key_conf.mask.src_ip,
620                                  (const rte_be32_t *)ipv6_mask->hdr.src_addr,
621                                  IP_ADDR_LEN);
622                 net_addr_to_host(rule->key_conf.mask.dst_ip,
623                                  (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
624                                  IP_ADDR_LEN);
625                 rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
626                 if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
627                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
628                 if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
629                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
630                 if (ipv6_mask->hdr.proto)
631                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
632         }
633
634         ipv6_spec = item->spec;
635         net_addr_to_host(rule->key_conf.spec.src_ip,
636                          (const rte_be32_t *)ipv6_spec->hdr.src_addr,
637                          IP_ADDR_LEN);
638         net_addr_to_host(rule->key_conf.spec.dst_ip,
639                          (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
640                          IP_ADDR_LEN);
641         rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
642
643         return 0;
644 }
645
646 static bool
647 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
648 {
649         if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
650             tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
651             tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
652             tcp_mask->hdr.tcp_urp)
653                 return false;
654
655         return true;
656 }
657
658 static int
659 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
660                struct rte_flow_error *error)
661 {
662         const struct rte_flow_item_tcp *tcp_spec;
663         const struct rte_flow_item_tcp *tcp_mask;
664
665         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
666         rule->key_conf.spec.ip_proto = IPPROTO_TCP;
667         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
668
669         /* Only used to describe the protocol stack. */
670         if (item->spec == NULL && item->mask == NULL)
671                 return 0;
672
673         if (item->mask) {
674                 tcp_mask = item->mask;
675                 if (!hns3_check_tcp_mask_supported(tcp_mask)) {
676                         return rte_flow_error_set(error, EINVAL,
677                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
678                                                   item,
679                                                   "Only support src & dst port in TCP");
680                 }
681
682                 if (tcp_mask->hdr.src_port) {
683                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
684                         rule->key_conf.mask.src_port =
685                             rte_be_to_cpu_16(tcp_mask->hdr.src_port);
686                 }
687                 if (tcp_mask->hdr.dst_port) {
688                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
689                         rule->key_conf.mask.dst_port =
690                             rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
691                 }
692         }
693
694         tcp_spec = item->spec;
695         rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
696         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
697
698         return 0;
699 }
700
701 static int
702 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
703                struct rte_flow_error *error)
704 {
705         const struct rte_flow_item_udp *udp_spec;
706         const struct rte_flow_item_udp *udp_mask;
707
708         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
709         rule->key_conf.spec.ip_proto = IPPROTO_UDP;
710         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
711
712         /* Only used to describe the protocol stack. */
713         if (item->spec == NULL && item->mask == NULL)
714                 return 0;
715
716         if (item->mask) {
717                 udp_mask = item->mask;
718                 if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
719                         return rte_flow_error_set(error, EINVAL,
720                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
721                                                   item,
722                                                   "Only support src & dst port in UDP");
723                 }
724                 if (udp_mask->hdr.src_port) {
725                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
726                         rule->key_conf.mask.src_port =
727                             rte_be_to_cpu_16(udp_mask->hdr.src_port);
728                 }
729                 if (udp_mask->hdr.dst_port) {
730                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
731                         rule->key_conf.mask.dst_port =
732                             rte_be_to_cpu_16(udp_mask->hdr.dst_port);
733                 }
734         }
735
736         udp_spec = item->spec;
737         rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
738         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
739
740         return 0;
741 }
742
743 static int
744 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
745                 struct rte_flow_error *error)
746 {
747         const struct rte_flow_item_sctp *sctp_spec;
748         const struct rte_flow_item_sctp *sctp_mask;
749
750         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
751         rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
752         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
753
754         /* Only used to describe the protocol stack. */
755         if (item->spec == NULL && item->mask == NULL)
756                 return 0;
757
758         if (item->mask) {
759                 sctp_mask = item->mask;
760                 if (sctp_mask->hdr.cksum)
761                         return rte_flow_error_set(error, EINVAL,
762                                                   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
763                                                   item,
764                                                   "Only support src & dst port in SCTP");
765                 if (sctp_mask->hdr.src_port) {
766                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
767                         rule->key_conf.mask.src_port =
768                             rte_be_to_cpu_16(sctp_mask->hdr.src_port);
769                 }
770                 if (sctp_mask->hdr.dst_port) {
771                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
772                         rule->key_conf.mask.dst_port =
773                             rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
774                 }
775                 if (sctp_mask->hdr.tag) {
776                         hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
777                         rule->key_conf.mask.sctp_tag =
778                             rte_be_to_cpu_32(sctp_mask->hdr.tag);
779                 }
780         }
781
782         sctp_spec = item->spec;
783         rule->key_conf.spec.src_port =
784             rte_be_to_cpu_16(sctp_spec->hdr.src_port);
785         rule->key_conf.spec.dst_port =
786             rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
787         rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
788
789         return 0;
790 }
791
792 /*
793  * Check items before tunnel, save inner configs to outer configs, and clear
794  * inner configs.
795  * The key consists of two parts: meta_data and tuple keys.
796  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
797  * packet(1bit).
798  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
799  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
800  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
801  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
802  * Vlantag2(16bit) and sctp-tag(32bit).
803  */
804 static int
805 hns3_handle_tunnel(const struct rte_flow_item *item,
806                    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
807 {
808         /* check eth config */
809         if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
810                 return rte_flow_error_set(error, EINVAL,
811                                           RTE_FLOW_ERROR_TYPE_ITEM,
812                                           item, "Outer eth mac is unsupported");
813         if (rule->input_set & BIT(INNER_ETH_TYPE)) {
814                 hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
815                 rule->key_conf.spec.outer_ether_type =
816                     rule->key_conf.spec.ether_type;
817                 rule->key_conf.mask.outer_ether_type =
818                     rule->key_conf.mask.ether_type;
819                 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
820                 rule->key_conf.spec.ether_type = 0;
821                 rule->key_conf.mask.ether_type = 0;
822         }
823
824         /* check vlan config */
825         if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
826                 return rte_flow_error_set(error, EINVAL,
827                                           RTE_FLOW_ERROR_TYPE_ITEM,
828                                           item,
829                                           "Outer vlan tags is unsupported");
830
831         /* clear vlan_num for inner vlan select */
832         rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
833         rule->key_conf.vlan_num = 0;
834
835         /* check L3 config */
836         if (rule->input_set &
837             (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
838                 return rte_flow_error_set(error, EINVAL,
839                                           RTE_FLOW_ERROR_TYPE_ITEM,
840                                           item, "Outer ip is unsupported");
841         if (rule->input_set & BIT(INNER_IP_PROTO)) {
842                 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
843                 rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
844                 rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
845                 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
846                 rule->key_conf.spec.ip_proto = 0;
847                 rule->key_conf.mask.ip_proto = 0;
848         }
849
850         /* check L4 config */
851         if (rule->input_set & BIT(INNER_SCTP_TAG))
852                 return rte_flow_error_set(error, EINVAL,
853                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
854                                           "Outer sctp tag is unsupported");
855
856         if (rule->input_set & BIT(INNER_SRC_PORT)) {
857                 hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
858                 rule->key_conf.spec.outer_src_port =
859                     rule->key_conf.spec.src_port;
860                 rule->key_conf.mask.outer_src_port =
861                     rule->key_conf.mask.src_port;
862                 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
863                 rule->key_conf.spec.src_port = 0;
864                 rule->key_conf.mask.src_port = 0;
865         }
866         if (rule->input_set & BIT(INNER_DST_PORT)) {
867                 hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
868                 rule->key_conf.spec.dst_port = 0;
869                 rule->key_conf.mask.dst_port = 0;
870         }
871         return 0;
872 }
873
874 static int
875 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
876                  struct rte_flow_error *error)
877 {
878         const struct rte_flow_item_vxlan *vxlan_spec;
879         const struct rte_flow_item_vxlan *vxlan_mask;
880
881         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
882         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
883         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
884                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
885         else
886                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
887
888         /* Only used to describe the protocol stack. */
889         if (item->spec == NULL && item->mask == NULL)
890                 return 0;
891
892         vxlan_mask = item->mask;
893         vxlan_spec = item->spec;
894
895         if (vxlan_mask->flags)
896                 return rte_flow_error_set(error, EINVAL,
897                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
898                                           "Flags is not supported in VxLAN");
899
900         /* VNI must be totally masked or not. */
901         if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
902             memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
903                 return rte_flow_error_set(error, EINVAL,
904                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
905                                           "VNI must be totally masked or not in VxLAN");
906         if (vxlan_mask->vni[0]) {
907                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
908                 memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
909                            VNI_OR_TNI_LEN);
910         }
911         memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
912                    VNI_OR_TNI_LEN);
913         return 0;
914 }
915
916 static int
917 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
918                  struct rte_flow_error *error)
919 {
920         const struct rte_flow_item_nvgre *nvgre_spec;
921         const struct rte_flow_item_nvgre *nvgre_mask;
922
923         hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
924         rule->key_conf.spec.outer_proto = IPPROTO_GRE;
925         rule->key_conf.mask.outer_proto = IPPROTO_MASK;
926
927         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
928         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
929         rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
930         /* Only used to describe the protocol stack. */
931         if (item->spec == NULL && item->mask == NULL)
932                 return 0;
933
934         nvgre_mask = item->mask;
935         nvgre_spec = item->spec;
936
937         if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
938                 return rte_flow_error_set(error, EINVAL,
939                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
940                                           "Ver/protocal is not supported in NVGRE");
941
942         /* TNI must be totally masked or not. */
943         if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
944             memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
945                 return rte_flow_error_set(error, EINVAL,
946                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
947                                           "TNI must be totally masked or not in NVGRE");
948
949         if (nvgre_mask->tni[0]) {
950                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
951                 memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
952                            VNI_OR_TNI_LEN);
953         }
954         memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
955                    VNI_OR_TNI_LEN);
956
957         if (nvgre_mask->flow_id) {
958                 hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
959                 rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
960         }
961         rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
962         return 0;
963 }
964
965 static int
966 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
967                   struct rte_flow_error *error)
968 {
969         const struct rte_flow_item_geneve *geneve_spec;
970         const struct rte_flow_item_geneve *geneve_mask;
971
972         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
973         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
974         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
975         /* Only used to describe the protocol stack. */
976         if (item->spec == NULL && item->mask == NULL)
977                 return 0;
978
979         geneve_mask = item->mask;
980         geneve_spec = item->spec;
981
982         if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
983                 return rte_flow_error_set(error, EINVAL,
984                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
985                                           "Ver/protocal is not supported in GENEVE");
986         /* VNI must be totally masked or not. */
987         if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
988             memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
989                 return rte_flow_error_set(error, EINVAL,
990                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
991                                           "VNI must be totally masked or not in GENEVE");
992         if (geneve_mask->vni[0]) {
993                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
994                 memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
995                            VNI_OR_TNI_LEN);
996         }
997         memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
998                    VNI_OR_TNI_LEN);
999         return 0;
1000 }
1001
1002 static int
1003 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1004                   struct rte_flow_error *error)
1005 {
1006         int ret;
1007
1008         if (item->spec == NULL && item->mask)
1009                 return rte_flow_error_set(error, EINVAL,
1010                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1011                                           "Can't configure FDIR with mask "
1012                                           "but without spec");
1013         else if (item->spec && (item->mask == NULL))
1014                 return rte_flow_error_set(error, EINVAL,
1015                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1016                                           "Tunnel packets must configure "
1017                                           "with mask");
1018
1019         switch (item->type) {
1020         case RTE_FLOW_ITEM_TYPE_VXLAN:
1021         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1022                 ret = hns3_parse_vxlan(item, rule, error);
1023                 break;
1024         case RTE_FLOW_ITEM_TYPE_NVGRE:
1025                 ret = hns3_parse_nvgre(item, rule, error);
1026                 break;
1027         case RTE_FLOW_ITEM_TYPE_GENEVE:
1028                 ret = hns3_parse_geneve(item, rule, error);
1029                 break;
1030         default:
1031                 return rte_flow_error_set(error, ENOTSUP,
1032                                           RTE_FLOW_ERROR_TYPE_ITEM,
1033                                           NULL, "Unsupported tunnel type!");
1034         }
1035         if (ret)
1036                 return ret;
1037         return hns3_handle_tunnel(item, rule, error);
1038 }
1039
1040 static int
1041 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1042                   struct items_step_mngr *step_mngr,
1043                   struct rte_flow_error *error)
1044 {
1045         int ret;
1046
1047         if (item->spec == NULL && item->mask)
1048                 return rte_flow_error_set(error, EINVAL,
1049                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1050                                           "Can't configure FDIR with mask "
1051                                           "but without spec");
1052
1053         switch (item->type) {
1054         case RTE_FLOW_ITEM_TYPE_ETH:
1055                 ret = hns3_parse_eth(item, rule, error);
1056                 step_mngr->items = L2_next_items;
1057                 step_mngr->count = ARRAY_SIZE(L2_next_items);
1058                 break;
1059         case RTE_FLOW_ITEM_TYPE_VLAN:
1060                 ret = hns3_parse_vlan(item, rule, error);
1061                 step_mngr->items = L2_next_items;
1062                 step_mngr->count = ARRAY_SIZE(L2_next_items);
1063                 break;
1064         case RTE_FLOW_ITEM_TYPE_IPV4:
1065                 ret = hns3_parse_ipv4(item, rule, error);
1066                 step_mngr->items = L3_next_items;
1067                 step_mngr->count = ARRAY_SIZE(L3_next_items);
1068                 break;
1069         case RTE_FLOW_ITEM_TYPE_IPV6:
1070                 ret = hns3_parse_ipv6(item, rule, error);
1071                 step_mngr->items = L3_next_items;
1072                 step_mngr->count = ARRAY_SIZE(L3_next_items);
1073                 break;
1074         case RTE_FLOW_ITEM_TYPE_TCP:
1075                 ret = hns3_parse_tcp(item, rule, error);
1076                 step_mngr->items = L4_next_items;
1077                 step_mngr->count = ARRAY_SIZE(L4_next_items);
1078                 break;
1079         case RTE_FLOW_ITEM_TYPE_UDP:
1080                 ret = hns3_parse_udp(item, rule, error);
1081                 step_mngr->items = L4_next_items;
1082                 step_mngr->count = ARRAY_SIZE(L4_next_items);
1083                 break;
1084         case RTE_FLOW_ITEM_TYPE_SCTP:
1085                 ret = hns3_parse_sctp(item, rule, error);
1086                 step_mngr->items = L4_next_items;
1087                 step_mngr->count = ARRAY_SIZE(L4_next_items);
1088                 break;
1089         default:
1090                 return rte_flow_error_set(error, ENOTSUP,
1091                                           RTE_FLOW_ERROR_TYPE_ITEM,
1092                                           NULL, "Unsupported normal type!");
1093         }
1094
1095         return ret;
1096 }
1097
1098 static int
1099 hns3_validate_item(const struct rte_flow_item *item,
1100                    struct items_step_mngr step_mngr,
1101                    struct rte_flow_error *error)
1102 {
1103         int i;
1104
1105         if (item->last)
1106                 return rte_flow_error_set(error, ENOTSUP,
1107                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1108                                           "Not supported last point for range");
1109
1110         for (i = 0; i < step_mngr.count; i++) {
1111                 if (item->type == step_mngr.items[i])
1112                         break;
1113         }
1114
1115         if (i == step_mngr.count) {
1116                 return rte_flow_error_set(error, EINVAL,
1117                                           RTE_FLOW_ERROR_TYPE_ITEM,
1118                                           item, "Inval or missing item");
1119         }
1120         return 0;
1121 }
1122
1123 static inline bool
1124 is_tunnel_packet(enum rte_flow_item_type type)
1125 {
1126         if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1127             type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1128             type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1129             type == RTE_FLOW_ITEM_TYPE_GENEVE)
1130                 return true;
1131         return false;
1132 }
1133
1134 /*
1135  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1136  * And get the flow director filter info BTW.
1137  * UDP/TCP/SCTP PATTERN:
1138  * The first not void item can be ETH or IPV4 or IPV6
1139  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1140  * The next not void item could be UDP or TCP or SCTP (optional)
1141  * The next not void item could be RAW (for flexbyte, optional)
1142  * The next not void item must be END.
1143  * A Fuzzy Match pattern can appear at any place before END.
1144  * Fuzzy Match is optional for IPV4 but is required for IPV6
1145  * MAC VLAN PATTERN:
1146  * The first not void item must be ETH.
1147  * The second not void item must be MAC VLAN.
1148  * The next not void item must be END.
1149  * ACTION:
1150  * The first not void action should be QUEUE or DROP.
1151  * The second not void optional action should be MARK,
1152  * mark_id is a uint32_t number.
1153  * The next not void action should be END.
1154  * UDP/TCP/SCTP pattern example:
1155  * ITEM         Spec                    Mask
1156  * ETH          NULL                    NULL
1157  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1158  *              dst_addr 192.167.3.50   0xFFFFFFFF
1159  * UDP/TCP/SCTP src_port        80      0xFFFF
1160  *              dst_port        80      0xFFFF
1161  * END
1162  * MAC VLAN pattern example:
1163  * ITEM         Spec                    Mask
1164  * ETH          dst_addr
1165                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1166                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1167  * MAC VLAN     tci     0x2016          0xEFFF
1168  * END
1169  * Other members in mask and spec should set to 0x00.
1170  * Item->last should be NULL.
1171  */
1172 static int
1173 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1174                        const struct rte_flow_item pattern[],
1175                        const struct rte_flow_action actions[],
1176                        struct hns3_fdir_rule *rule,
1177                        struct rte_flow_error *error)
1178 {
1179         struct hns3_adapter *hns = dev->data->dev_private;
1180         const struct rte_flow_item *item;
1181         struct items_step_mngr step_mngr;
1182         int ret;
1183
1184         /* FDIR is available only in PF driver */
1185         if (hns->is_vf)
1186                 return rte_flow_error_set(error, ENOTSUP,
1187                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1188                                           "Fdir not supported in VF");
1189
1190         step_mngr.items = first_items;
1191         step_mngr.count = ARRAY_SIZE(first_items);
1192         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1193                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1194                         continue;
1195
1196                 ret = hns3_validate_item(item, step_mngr, error);
1197                 if (ret)
1198                         return ret;
1199
1200                 if (is_tunnel_packet(item->type)) {
1201                         ret = hns3_parse_tunnel(item, rule, error);
1202                         if (ret)
1203                                 return ret;
1204                         step_mngr.items = tunnel_next_items;
1205                         step_mngr.count = ARRAY_SIZE(tunnel_next_items);
1206                 } else {
1207                         ret = hns3_parse_normal(item, rule, &step_mngr, error);
1208                         if (ret)
1209                                 return ret;
1210                 }
1211         }
1212
1213         return hns3_handle_actions(dev, actions, rule, error);
1214 }
1215
1216 void
1217 hns3_filterlist_init(struct rte_eth_dev *dev)
1218 {
1219         struct hns3_process_private *process_list = dev->process_private;
1220
1221         TAILQ_INIT(&process_list->fdir_list);
1222         TAILQ_INIT(&process_list->filter_rss_list);
1223         TAILQ_INIT(&process_list->flow_list);
1224 }
1225
1226 static void
1227 hns3_filterlist_flush(struct rte_eth_dev *dev)
1228 {
1229         struct hns3_process_private *process_list = dev->process_private;
1230         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1231         struct hns3_rss_conf_ele *rss_filter_ptr;
1232         struct hns3_flow_mem *flow_node;
1233
1234         fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1235         while (fdir_rule_ptr) {
1236                 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1237                 rte_free(fdir_rule_ptr);
1238                 fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1239         }
1240
1241         rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1242         while (rss_filter_ptr) {
1243                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1244                              entries);
1245                 rte_free(rss_filter_ptr);
1246                 rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1247         }
1248
1249         flow_node = TAILQ_FIRST(&process_list->flow_list);
1250         while (flow_node) {
1251                 TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1252                 rte_free(flow_node->flow);
1253                 rte_free(flow_node);
1254                 flow_node = TAILQ_FIRST(&process_list->flow_list);
1255         }
1256 }
1257
1258 static bool
1259 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1260                      const struct rte_flow_action_rss *with)
1261 {
1262         bool func_is_same;
1263
1264         /*
1265          * When user flush all RSS rule, RSS func is set invalid with
1266          * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1267          * flushed, any validate RSS func is different with it before
1268          * flushed. Others, when user create an action RSS with RSS func
1269          * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1270          * between continuous RSS flow.
1271          */
1272         if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1273                 func_is_same = false;
1274         else
1275                 func_is_same = with->func ? (comp->func == with->func) : true;
1276
1277         return (func_is_same &&
1278                 comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1279                 comp->level == with->level && comp->key_len == with->key_len &&
1280                 comp->queue_num == with->queue_num &&
1281                 !memcmp(comp->key, with->key, with->key_len) &&
1282                 !memcmp(comp->queue, with->queue,
1283                         sizeof(*with->queue) * with->queue_num));
1284 }
1285
1286 static int
1287 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1288                    const struct rte_flow_action_rss *in)
1289 {
1290         if (in->key_len > RTE_DIM(out->key) ||
1291             in->queue_num > RTE_DIM(out->queue))
1292                 return -EINVAL;
1293         if (in->key == NULL && in->key_len)
1294                 return -EINVAL;
1295         out->conf = (struct rte_flow_action_rss) {
1296                 .func = in->func,
1297                 .level = in->level,
1298                 .types = in->types,
1299                 .key_len = in->key_len,
1300                 .queue_num = in->queue_num,
1301         };
1302         out->conf.queue = memcpy(out->queue, in->queue,
1303                                 sizeof(*in->queue) * in->queue_num);
1304         if (in->key)
1305                 out->conf.key = memcpy(out->key, in->key, in->key_len);
1306
1307         return 0;
1308 }
1309
1310 static bool
1311 hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1312                                const struct rte_flow_action_rss *rss)
1313 {
1314         /*
1315          * For IP packet, it is not supported to use src/dst port fields to RSS
1316          * hash for the following packet types.
1317          * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1318          * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1319          * port fields to RSS hash for IPV6 SCTP packet type. However, the
1320          * Kunpeng930 and future kunpeng series support to use src/dst port
1321          * fields to RSS hash for IPv6 SCTP packet type.
1322          */
1323         if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
1324             (rss->types & ETH_RSS_IP ||
1325             (!hw->rss_info.ipv6_sctp_offload_supported &&
1326             rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
1327                 return false;
1328
1329         return true;
1330 }
1331
1332 /*
1333  * This function is used to parse rss action validatation.
1334  */
1335 static int
1336 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1337                       const struct rte_flow_action *actions,
1338                       struct rte_flow_error *error)
1339 {
1340         struct hns3_adapter *hns = dev->data->dev_private;
1341         struct hns3_hw *hw = &hns->hw;
1342         struct hns3_rss_conf *rss_conf = &hw->rss_info;
1343         const struct rte_flow_action_rss *rss;
1344         const struct rte_flow_action *act;
1345         uint32_t act_index = 0;
1346         uint16_t n;
1347
1348         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1349         rss = act->conf;
1350
1351         if (rss == NULL) {
1352                 return rte_flow_error_set(error, EINVAL,
1353                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1354                                           act, "no valid queues");
1355         }
1356
1357         if (rss->queue_num > RTE_DIM(rss_conf->queue))
1358                 return rte_flow_error_set(error, ENOTSUP,
1359                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1360                                           "queue number configured exceeds "
1361                                           "queue buffer size driver supported");
1362
1363         for (n = 0; n < rss->queue_num; n++) {
1364                 if (rss->queue[n] < hw->alloc_rss_size)
1365                         continue;
1366                 return rte_flow_error_set(error, EINVAL,
1367                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1368                                           "queue id must be less than queue number allocated to a TC");
1369         }
1370
1371         if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1372                 return rte_flow_error_set(error, EINVAL,
1373                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1374                                           act,
1375                                           "Flow types is unsupported by "
1376                                           "hns3's RSS");
1377         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1378                 return rte_flow_error_set(error, ENOTSUP,
1379                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1380                                           "RSS hash func are not supported");
1381         if (rss->level)
1382                 return rte_flow_error_set(error, ENOTSUP,
1383                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1384                                           "a nonzero RSS encapsulation level is not supported");
1385         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1386                 return rte_flow_error_set(error, ENOTSUP,
1387                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1388                                           "RSS hash key must be exactly 40 bytes");
1389
1390         if (!hns3_rss_input_tuple_supported(hw, rss))
1391                 return rte_flow_error_set(error, EINVAL,
1392                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1393                                           &rss->types,
1394                                           "input RSS types are not supported");
1395
1396         act_index++;
1397
1398         /* Check if the next not void action is END */
1399         NEXT_ITEM_OF_ACTION(act, actions, act_index);
1400         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1401                 memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1402                 return rte_flow_error_set(error, EINVAL,
1403                                           RTE_FLOW_ERROR_TYPE_ACTION,
1404                                           act, "Not supported action.");
1405         }
1406
1407         return 0;
1408 }
1409
1410 static int
1411 hns3_disable_rss(struct hns3_hw *hw)
1412 {
1413         int ret;
1414
1415         /* Redirected the redirection table to queue 0 */
1416         ret = hns3_rss_reset_indir_table(hw);
1417         if (ret)
1418                 return ret;
1419
1420         /* Disable RSS */
1421         hw->rss_info.conf.types = 0;
1422         hw->rss_dis_flag = true;
1423
1424         return 0;
1425 }
1426
1427 static void
1428 hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1429 {
1430         if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1431                 hns3_warn(hw, "Default RSS hash key to be set");
1432                 rss_conf->key = hns3_hash_key;
1433                 rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1434         }
1435 }
1436
1437 static int
1438 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1439                          uint8_t *hash_algo)
1440 {
1441         enum rte_eth_hash_function algo_func = *func;
1442         switch (algo_func) {
1443         case RTE_ETH_HASH_FUNCTION_DEFAULT:
1444                 /* Keep *hash_algo as what it used to be */
1445                 algo_func = hw->rss_info.conf.func;
1446                 break;
1447         case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1448                 *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1449                 break;
1450         case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1451                 *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1452                 break;
1453         case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1454                 *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1455                 break;
1456         default:
1457                 hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
1458                          algo_func);
1459                 return -EINVAL;
1460         }
1461         *func = algo_func;
1462
1463         return 0;
1464 }
1465
1466 static int
1467 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1468 {
1469         struct hns3_rss_tuple_cfg *tuple;
1470         int ret;
1471
1472         hns3_parse_rss_key(hw, rss_config);
1473
1474         ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1475                                        &hw->rss_info.hash_algo);
1476         if (ret)
1477                 return ret;
1478
1479         ret = hns3_rss_set_algo_key(hw, rss_config->key);
1480         if (ret)
1481                 return ret;
1482
1483         hw->rss_info.conf.func = rss_config->func;
1484
1485         tuple = &hw->rss_info.rss_tuple_sets;
1486         ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1487         if (ret)
1488                 hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1489
1490         return ret;
1491 }
1492
1493 static int
1494 hns3_update_indir_table(struct rte_eth_dev *dev,
1495                         const struct rte_flow_action_rss *conf, uint16_t num)
1496 {
1497         struct hns3_adapter *hns = dev->data->dev_private;
1498         struct hns3_hw *hw = &hns->hw;
1499         uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1500         uint16_t j;
1501         uint32_t i;
1502
1503         /* Fill in redirection table */
1504         memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1505                sizeof(hw->rss_info.rss_indirection_tbl));
1506         for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1507                 j %= num;
1508                 if (conf->queue[j] >= hw->alloc_rss_size) {
1509                         hns3_err(hw, "queue id(%u) set to redirection table "
1510                                  "exceeds queue number(%u) allocated to a TC.",
1511                                  conf->queue[j], hw->alloc_rss_size);
1512                         return -EINVAL;
1513                 }
1514                 indir_tbl[i] = conf->queue[j];
1515         }
1516
1517         return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1518 }
1519
1520 static int
1521 hns3_config_rss_filter(struct rte_eth_dev *dev,
1522                        const struct hns3_rss_conf *conf, bool add)
1523 {
1524         struct hns3_process_private *process_list = dev->process_private;
1525         struct hns3_adapter *hns = dev->data->dev_private;
1526         struct hns3_rss_conf_ele *rss_filter_ptr;
1527         struct hns3_hw *hw = &hns->hw;
1528         struct hns3_rss_conf *rss_info;
1529         uint64_t flow_types;
1530         uint16_t num;
1531         int ret;
1532
1533         struct rte_flow_action_rss rss_flow_conf = {
1534                 .func = conf->conf.func,
1535                 .level = conf->conf.level,
1536                 .types = conf->conf.types,
1537                 .key_len = conf->conf.key_len,
1538                 .queue_num = conf->conf.queue_num,
1539                 .key = conf->conf.key_len ?
1540                     (void *)(uintptr_t)conf->conf.key : NULL,
1541                 .queue = conf->conf.queue,
1542         };
1543
1544         /* Filter the unsupported flow types */
1545         flow_types = conf->conf.types ?
1546                      rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1547                      hw->rss_info.conf.types;
1548         if (flow_types != rss_flow_conf.types)
1549                 hns3_warn(hw, "modified RSS types based on hardware support, "
1550                               "requested:%" PRIx64 " configured:%" PRIx64,
1551                           rss_flow_conf.types, flow_types);
1552         /* Update the useful flow types */
1553         rss_flow_conf.types = flow_types;
1554
1555         rss_info = &hw->rss_info;
1556         if (!add) {
1557                 if (!conf->valid)
1558                         return 0;
1559
1560                 ret = hns3_disable_rss(hw);
1561                 if (ret) {
1562                         hns3_err(hw, "RSS disable failed(%d)", ret);
1563                         return ret;
1564                 }
1565
1566                 if (rss_flow_conf.queue_num) {
1567                         /*
1568                          * Due the content of queue pointer have been reset to
1569                          * 0, the rss_info->conf.queue should be set to NULL
1570                          */
1571                         rss_info->conf.queue = NULL;
1572                         rss_info->conf.queue_num = 0;
1573                 }
1574
1575                 /* set RSS func invalid after flushed */
1576                 rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1577                 return 0;
1578         }
1579
1580         /* Set rx queues to use */
1581         num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1582         if (rss_flow_conf.queue_num > num)
1583                 hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1584                           rss_flow_conf.queue_num);
1585         hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1586
1587         rte_spinlock_lock(&hw->lock);
1588         if (num) {
1589                 ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1590                 if (ret)
1591                         goto rss_config_err;
1592         }
1593
1594         /* Set hash algorithm and flow types by the user's config */
1595         ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1596         if (ret)
1597                 goto rss_config_err;
1598
1599         ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1600         if (ret) {
1601                 hns3_err(hw, "RSS config init fail(%d)", ret);
1602                 goto rss_config_err;
1603         }
1604
1605         /*
1606          * When create a new RSS rule, the old rule will be overlaid and set
1607          * invalid.
1608          */
1609         TAILQ_FOREACH(rss_filter_ptr, &process_list->filter_rss_list, entries)
1610                 rss_filter_ptr->filter_info.valid = false;
1611
1612 rss_config_err:
1613         rte_spinlock_unlock(&hw->lock);
1614
1615         return ret;
1616 }
1617
1618 static int
1619 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1620 {
1621         struct hns3_process_private *process_list = dev->process_private;
1622         struct hns3_adapter *hns = dev->data->dev_private;
1623         struct hns3_rss_conf_ele *rss_filter_ptr;
1624         struct hns3_hw *hw = &hns->hw;
1625         int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1626         int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1627         int ret = 0;
1628
1629         rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1630         while (rss_filter_ptr) {
1631                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1632                              entries);
1633                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1634                                              false);
1635                 if (ret)
1636                         rss_rule_fail_cnt++;
1637                 else
1638                         rss_rule_succ_cnt++;
1639                 rte_free(rss_filter_ptr);
1640                 rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1641         }
1642
1643         if (rss_rule_fail_cnt) {
1644                 hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1645                              "fail num = %d", rss_rule_succ_cnt,
1646                              rss_rule_fail_cnt);
1647                 ret = -EIO;
1648         }
1649
1650         return ret;
1651 }
1652
1653 int
1654 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1655 {
1656         struct hns3_adapter *hns = dev->data->dev_private;
1657         struct hns3_hw *hw = &hns->hw;
1658
1659         /* When user flush all rules, it doesn't need to restore RSS rule */
1660         if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1661                 return 0;
1662
1663         return hns3_config_rss_filter(dev, &hw->rss_info, true);
1664 }
1665
1666 static int
1667 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1668                     const struct hns3_rss_conf *conf, bool add)
1669 {
1670         struct hns3_adapter *hns = dev->data->dev_private;
1671         struct hns3_hw *hw = &hns->hw;
1672         bool ret;
1673
1674         ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1675         if (ret) {
1676                 hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1677                 return -EINVAL;
1678         }
1679
1680         return hns3_config_rss_filter(dev, conf, add);
1681 }
1682
1683 static int
1684 hns3_flow_args_check(const struct rte_flow_attr *attr,
1685                      const struct rte_flow_item pattern[],
1686                      const struct rte_flow_action actions[],
1687                      struct rte_flow_error *error)
1688 {
1689         if (pattern == NULL)
1690                 return rte_flow_error_set(error, EINVAL,
1691                                           RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1692                                           NULL, "NULL pattern.");
1693
1694         if (actions == NULL)
1695                 return rte_flow_error_set(error, EINVAL,
1696                                           RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1697                                           NULL, "NULL action.");
1698
1699         if (attr == NULL)
1700                 return rte_flow_error_set(error, EINVAL,
1701                                           RTE_FLOW_ERROR_TYPE_ATTR,
1702                                           NULL, "NULL attribute.");
1703
1704         return hns3_check_attr(attr, error);
1705 }
1706
1707 /*
1708  * Check if the flow rule is supported by hns3.
1709  * It only checkes the format. Don't guarantee the rule can be programmed into
1710  * the HW. Because there can be no enough room for the rule.
1711  */
1712 static int
1713 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1714                    const struct rte_flow_item pattern[],
1715                    const struct rte_flow_action actions[],
1716                    struct rte_flow_error *error)
1717 {
1718         struct hns3_fdir_rule fdir_rule;
1719         int ret;
1720
1721         ret = hns3_flow_args_check(attr, pattern, actions, error);
1722         if (ret)
1723                 return ret;
1724
1725         if (hns3_find_rss_general_action(pattern, actions))
1726                 return hns3_parse_rss_filter(dev, actions, error);
1727
1728         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1729         return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1730 }
1731
1732 /*
1733  * Create or destroy a flow rule.
1734  * Theorically one rule can match more than one filters.
1735  * We will let it use the filter which it hit first.
1736  * So, the sequence matters.
1737  */
1738 static struct rte_flow *
1739 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1740                  const struct rte_flow_item pattern[],
1741                  const struct rte_flow_action actions[],
1742                  struct rte_flow_error *error)
1743 {
1744         struct hns3_process_private *process_list = dev->process_private;
1745         struct hns3_adapter *hns = dev->data->dev_private;
1746         struct hns3_hw *hw = &hns->hw;
1747         const struct hns3_rss_conf *rss_conf;
1748         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1749         struct hns3_rss_conf_ele *rss_filter_ptr;
1750         struct hns3_flow_mem *flow_node;
1751         const struct rte_flow_action *act;
1752         struct rte_flow *flow;
1753         struct hns3_fdir_rule fdir_rule;
1754         int ret;
1755
1756         ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1757         if (ret)
1758                 return NULL;
1759
1760         flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1761         if (flow == NULL) {
1762                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1763                                    NULL, "Failed to allocate flow memory");
1764                 return NULL;
1765         }
1766         flow_node = rte_zmalloc("hns3 flow node",
1767                                 sizeof(struct hns3_flow_mem), 0);
1768         if (flow_node == NULL) {
1769                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1770                                    NULL, "Failed to allocate flow list memory");
1771                 rte_free(flow);
1772                 return NULL;
1773         }
1774
1775         flow_node->flow = flow;
1776         TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
1777
1778         act = hns3_find_rss_general_action(pattern, actions);
1779         if (act) {
1780                 rss_conf = act->conf;
1781
1782                 ret = hns3_flow_parse_rss(dev, rss_conf, true);
1783                 if (ret)
1784                         goto err;
1785
1786                 rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1787                                              sizeof(struct hns3_rss_conf_ele),
1788                                              0);
1789                 if (rss_filter_ptr == NULL) {
1790                         hns3_err(hw,
1791                                     "Failed to allocate hns3_rss_filter memory");
1792                         ret = -ENOMEM;
1793                         goto err;
1794                 }
1795                 hns3_rss_conf_copy(&rss_filter_ptr->filter_info,
1796                                    &rss_conf->conf);
1797                 rss_filter_ptr->filter_info.valid = true;
1798                 TAILQ_INSERT_TAIL(&process_list->filter_rss_list,
1799                                   rss_filter_ptr, entries);
1800
1801                 flow->rule = rss_filter_ptr;
1802                 flow->filter_type = RTE_ETH_FILTER_HASH;
1803                 return flow;
1804         }
1805
1806         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1807         ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1808         if (ret)
1809                 goto out;
1810
1811         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1812                 ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
1813                                        fdir_rule.act_cnt.id, error);
1814                 if (ret)
1815                         goto out;
1816
1817                 flow->counter_id = fdir_rule.act_cnt.id;
1818         }
1819
1820         fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1821                                     sizeof(struct hns3_fdir_rule_ele),
1822                                     0);
1823         if (fdir_rule_ptr == NULL) {
1824                 hns3_err(hw, "failed to allocate fdir_rule memory.");
1825                 ret = -ENOMEM;
1826                 goto err_fdir;
1827         }
1828
1829         ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1830         if (!ret) {
1831                 memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1832                         sizeof(struct hns3_fdir_rule));
1833                 TAILQ_INSERT_TAIL(&process_list->fdir_list,
1834                                   fdir_rule_ptr, entries);
1835                 flow->rule = fdir_rule_ptr;
1836                 flow->filter_type = RTE_ETH_FILTER_FDIR;
1837
1838                 return flow;
1839         }
1840
1841         rte_free(fdir_rule_ptr);
1842 err_fdir:
1843         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1844                 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1845 err:
1846         rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1847                            "Failed to create flow");
1848 out:
1849         TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1850         rte_free(flow_node);
1851         rte_free(flow);
1852         return NULL;
1853 }
1854
1855 /* Destroy a flow rule on hns3. */
1856 static int
1857 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1858                   struct rte_flow_error *error)
1859 {
1860         struct hns3_process_private *process_list = dev->process_private;
1861         struct hns3_adapter *hns = dev->data->dev_private;
1862         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1863         struct hns3_rss_conf_ele *rss_filter_ptr;
1864         struct hns3_flow_mem *flow_node;
1865         enum rte_filter_type filter_type;
1866         struct hns3_fdir_rule fdir_rule;
1867         int ret;
1868
1869         if (flow == NULL)
1870                 return rte_flow_error_set(error, EINVAL,
1871                                           RTE_FLOW_ERROR_TYPE_HANDLE,
1872                                           flow, "Flow is NULL");
1873
1874         filter_type = flow->filter_type;
1875         switch (filter_type) {
1876         case RTE_ETH_FILTER_FDIR:
1877                 fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1878                 memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1879                            sizeof(struct hns3_fdir_rule));
1880
1881                 ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1882                 if (ret)
1883                         return rte_flow_error_set(error, EIO,
1884                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1885                                                   flow,
1886                                                   "Destroy FDIR fail.Try again");
1887                 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1888                         hns3_counter_release(dev, fdir_rule.act_cnt.id);
1889                 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1890                 rte_free(fdir_rule_ptr);
1891                 fdir_rule_ptr = NULL;
1892                 break;
1893         case RTE_ETH_FILTER_HASH:
1894                 rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1895                 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1896                                              false);
1897                 if (ret)
1898                         return rte_flow_error_set(error, EIO,
1899                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1900                                                   flow,
1901                                                   "Destroy RSS fail.Try again");
1902                 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1903                              entries);
1904                 rte_free(rss_filter_ptr);
1905                 rss_filter_ptr = NULL;
1906                 break;
1907         default:
1908                 return rte_flow_error_set(error, EINVAL,
1909                                           RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1910                                           "Unsupported filter type");
1911         }
1912
1913         TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) {
1914                 if (flow_node->flow == flow) {
1915                         TAILQ_REMOVE(&process_list->flow_list, flow_node,
1916                                      entries);
1917                         rte_free(flow_node);
1918                         flow_node = NULL;
1919                         break;
1920                 }
1921         }
1922         rte_free(flow);
1923         flow = NULL;
1924
1925         return 0;
1926 }
1927
1928 /*  Destroy all flow rules associated with a port on hns3. */
1929 static int
1930 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1931 {
1932         struct hns3_adapter *hns = dev->data->dev_private;
1933         int ret;
1934
1935         /* FDIR is available only in PF driver */
1936         if (!hns->is_vf) {
1937                 ret = hns3_clear_all_fdir_filter(hns);
1938                 if (ret) {
1939                         rte_flow_error_set(error, ret,
1940                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1941                                            NULL, "Failed to flush rule");
1942                         return ret;
1943                 }
1944                 hns3_counter_flush(dev);
1945         }
1946
1947         ret = hns3_clear_rss_filter(dev);
1948         if (ret) {
1949                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1950                                    NULL, "Failed to flush rss filter");
1951                 return ret;
1952         }
1953
1954         hns3_filterlist_flush(dev);
1955
1956         return 0;
1957 }
1958
1959 /* Query an existing flow rule. */
1960 static int
1961 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1962                 const struct rte_flow_action *actions, void *data,
1963                 struct rte_flow_error *error)
1964 {
1965         struct rte_flow_action_rss *rss_conf;
1966         struct hns3_rss_conf_ele *rss_rule;
1967         struct rte_flow_query_count *qc;
1968         int ret;
1969
1970         if (!flow->rule)
1971                 return rte_flow_error_set(error, EINVAL,
1972                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
1973
1974         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1975                 switch (actions->type) {
1976                 case RTE_FLOW_ACTION_TYPE_VOID:
1977                         break;
1978                 case RTE_FLOW_ACTION_TYPE_COUNT:
1979                         qc = (struct rte_flow_query_count *)data;
1980                         ret = hns3_counter_query(dev, flow, qc, error);
1981                         if (ret)
1982                                 return ret;
1983                         break;
1984                 case RTE_FLOW_ACTION_TYPE_RSS:
1985                         if (flow->filter_type != RTE_ETH_FILTER_HASH) {
1986                                 return rte_flow_error_set(error, ENOTSUP,
1987                                         RTE_FLOW_ERROR_TYPE_ACTION,
1988                                         actions, "action is not supported");
1989                         }
1990                         rss_conf = (struct rte_flow_action_rss *)data;
1991                         rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
1992                         rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
1993                                    sizeof(struct rte_flow_action_rss));
1994                         break;
1995                 default:
1996                         return rte_flow_error_set(error, ENOTSUP,
1997                                 RTE_FLOW_ERROR_TYPE_ACTION,
1998                                 actions, "action is not supported");
1999                 }
2000         }
2001
2002         return 0;
2003 }
2004
2005 static const struct rte_flow_ops hns3_flow_ops = {
2006         .validate = hns3_flow_validate,
2007         .create = hns3_flow_create,
2008         .destroy = hns3_flow_destroy,
2009         .flush = hns3_flow_flush,
2010         .query = hns3_flow_query,
2011         .isolate = NULL,
2012 };
2013
2014 int
2015 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2016                       const struct rte_flow_ops **ops)
2017 {
2018         struct hns3_hw *hw;
2019
2020         hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2021         if (hw->adapter_state >= HNS3_NIC_CLOSED)
2022                 return -ENODEV;
2023
2024         *ops = &hns3_flow_ops;
2025         return 0;
2026 }