net/hns3: support flow director
[dpdk.git] / drivers / net / hns3 / hns3_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4
5 #include <stdbool.h>
6 #include <sys/queue.h>
7 #include <rte_flow_driver.h>
8 #include <rte_io.h>
9 #include <rte_malloc.h>
10
11 #include "hns3_ethdev.h"
12 #include "hns3_logs.h"
13
14 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
15 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
16
17 /* Special Filter id for non-specific packet flagging. Don't change value */
18 #define HNS3_MAX_FILTER_ID      0x0FFF
19
20 #define ETHER_TYPE_MASK         0xFFFF
21 #define IPPROTO_MASK            0xFF
22 #define TUNNEL_TYPE_MASK        0xFFFF
23
24 #define HNS3_TUNNEL_TYPE_VXLAN          0x12B5
25 #define HNS3_TUNNEL_TYPE_VXLAN_GPE      0x12B6
26 #define HNS3_TUNNEL_TYPE_GENEVE         0x17C1
27 #define HNS3_TUNNEL_TYPE_NVGRE          0x6558
28
29 static enum rte_flow_item_type first_items[] = {
30         RTE_FLOW_ITEM_TYPE_ETH,
31         RTE_FLOW_ITEM_TYPE_IPV4,
32         RTE_FLOW_ITEM_TYPE_IPV6,
33         RTE_FLOW_ITEM_TYPE_TCP,
34         RTE_FLOW_ITEM_TYPE_UDP,
35         RTE_FLOW_ITEM_TYPE_SCTP,
36         RTE_FLOW_ITEM_TYPE_ICMP,
37         RTE_FLOW_ITEM_TYPE_NVGRE,
38         RTE_FLOW_ITEM_TYPE_VXLAN,
39         RTE_FLOW_ITEM_TYPE_GENEVE,
40         RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
41         RTE_FLOW_ITEM_TYPE_MPLS
42 };
43
44 static enum rte_flow_item_type L2_next_items[] = {
45         RTE_FLOW_ITEM_TYPE_VLAN,
46         RTE_FLOW_ITEM_TYPE_IPV4,
47         RTE_FLOW_ITEM_TYPE_IPV6
48 };
49
50 static enum rte_flow_item_type L3_next_items[] = {
51         RTE_FLOW_ITEM_TYPE_TCP,
52         RTE_FLOW_ITEM_TYPE_UDP,
53         RTE_FLOW_ITEM_TYPE_SCTP,
54         RTE_FLOW_ITEM_TYPE_NVGRE,
55         RTE_FLOW_ITEM_TYPE_ICMP
56 };
57
58 static enum rte_flow_item_type L4_next_items[] = {
59         RTE_FLOW_ITEM_TYPE_VXLAN,
60         RTE_FLOW_ITEM_TYPE_GENEVE,
61         RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
62         RTE_FLOW_ITEM_TYPE_MPLS
63 };
64
65 static enum rte_flow_item_type tunnel_next_items[] = {
66         RTE_FLOW_ITEM_TYPE_ETH,
67         RTE_FLOW_ITEM_TYPE_VLAN
68 };
69
70 struct items_step_mngr {
71         enum rte_flow_item_type *items;
72         int count;
73 };
74
75 static inline void
76 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
77 {
78         size_t i;
79
80         for (i = 0; i < len; i++)
81                 dst[i] = rte_be_to_cpu_32(src[i]);
82 }
83
84 static inline struct hns3_flow_counter *
85 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
86 {
87         struct hns3_adapter *hns = dev->data->dev_private;
88         struct hns3_pf *pf = &hns->pf;
89         struct hns3_flow_counter *cnt;
90
91         LIST_FOREACH(cnt, &pf->flow_counters, next) {
92                 if (cnt->id == id)
93                         return cnt;
94         }
95         return NULL;
96 }
97
98 static int
99 hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
100                  struct rte_flow_error *error)
101 {
102         struct hns3_adapter *hns = dev->data->dev_private;
103         struct hns3_pf *pf = &hns->pf;
104         struct hns3_flow_counter *cnt;
105
106         cnt = hns3_counter_lookup(dev, id);
107         if (cnt) {
108                 if (!cnt->shared || cnt->shared != shared)
109                         return rte_flow_error_set(error, ENOTSUP,
110                                                   RTE_FLOW_ERROR_TYPE_ACTION,
111                                                   cnt,
112                                                   "Counter id is used,shared flag not match");
113                 cnt->ref_cnt++;
114                 return 0;
115         }
116
117         cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
118         if (cnt == NULL)
119                 return rte_flow_error_set(error, ENOMEM,
120                                           RTE_FLOW_ERROR_TYPE_ACTION, cnt,
121                                           "Alloc mem for counter failed");
122         cnt->id = id;
123         cnt->shared = shared;
124         cnt->ref_cnt = 1;
125         cnt->hits = 0;
126         LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
127         return 0;
128 }
129
130 static int
131 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
132                    struct rte_flow_query_count *qc,
133                    struct rte_flow_error *error)
134 {
135         struct hns3_adapter *hns = dev->data->dev_private;
136         struct hns3_flow_counter *cnt;
137         uint64_t value;
138         int ret;
139
140         /* FDIR is available only in PF driver */
141         if (hns->is_vf)
142                 return rte_flow_error_set(error, ENOTSUP,
143                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
144                                           "Fdir is not supported in VF");
145         cnt = hns3_counter_lookup(dev, flow->counter_id);
146         if (cnt == NULL)
147                 return rte_flow_error_set(error, EINVAL,
148                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
149                                           "Can't find counter id");
150
151         ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
152         if (ret) {
153                 rte_flow_error_set(error, -ret,
154                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
155                                    NULL, "Read counter fail.");
156                 return ret;
157         }
158         qc->hits_set = 1;
159         qc->hits = value;
160
161         return 0;
162 }
163
164 static int
165 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
166 {
167         struct hns3_adapter *hns = dev->data->dev_private;
168         struct hns3_hw *hw = &hns->hw;
169         struct hns3_flow_counter *cnt;
170
171         cnt = hns3_counter_lookup(dev, id);
172         if (cnt == NULL) {
173                 hns3_err(hw, "Can't find available counter to release");
174                 return -EINVAL;
175         }
176         cnt->ref_cnt--;
177         if (cnt->ref_cnt == 0) {
178                 LIST_REMOVE(cnt, next);
179                 rte_free(cnt);
180         }
181         return 0;
182 }
183
184 static void
185 hns3_counter_flush(struct rte_eth_dev *dev)
186 {
187         struct hns3_adapter *hns = dev->data->dev_private;
188         struct hns3_pf *pf = &hns->pf;
189         struct hns3_flow_counter *cnt_ptr;
190
191         cnt_ptr = LIST_FIRST(&pf->flow_counters);
192         while (cnt_ptr) {
193                 LIST_REMOVE(cnt_ptr, next);
194                 rte_free(cnt_ptr);
195                 cnt_ptr = LIST_FIRST(&pf->flow_counters);
196         }
197 }
198
199 static int
200 hns3_handle_action_queue(struct rte_eth_dev *dev,
201                          const struct rte_flow_action *action,
202                          struct hns3_fdir_rule *rule,
203                          struct rte_flow_error *error)
204 {
205         struct hns3_adapter *hns = dev->data->dev_private;
206         struct hns3_hw *hw = &hns->hw;
207         const struct rte_flow_action_queue *queue;
208
209         queue = (const struct rte_flow_action_queue *)action->conf;
210         if (queue->index >= hw->data->nb_rx_queues)
211                 return rte_flow_error_set(error, EINVAL,
212                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
213                                           "Invalid queue ID in PF");
214         rule->queue_id = queue->index;
215         rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
216         return 0;
217 }
218
219 /*
220  * Parse actions structure from the provided pattern.
221  * The pattern is validated as the items are copied.
222  *
223  * @param actions[in]
224  * @param rule[out]
225  *   NIC specfilc actions derived from the actions.
226  * @param error[out]
227  */
228 static int
229 hns3_handle_actions(struct rte_eth_dev *dev,
230                     const struct rte_flow_action actions[],
231                     struct hns3_fdir_rule *rule, struct rte_flow_error *error)
232 {
233         struct hns3_adapter *hns = dev->data->dev_private;
234         const struct rte_flow_action_count *act_count;
235         const struct rte_flow_action_mark *mark;
236         struct hns3_pf *pf = &hns->pf;
237         uint32_t counter_num;
238         int ret;
239
240         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
241                 switch (actions->type) {
242                 case RTE_FLOW_ACTION_TYPE_QUEUE:
243                         ret = hns3_handle_action_queue(dev, actions, rule,
244                                                        error);
245                         if (ret)
246                                 return ret;
247                         break;
248                 case RTE_FLOW_ACTION_TYPE_DROP:
249                         rule->action = HNS3_FD_ACTION_DROP_PACKET;
250                         break;
251                 case RTE_FLOW_ACTION_TYPE_MARK:
252                         mark =
253                             (const struct rte_flow_action_mark *)actions->conf;
254                         if (mark->id >= HNS3_MAX_FILTER_ID)
255                                 return rte_flow_error_set(error, EINVAL,
256                                                      RTE_FLOW_ERROR_TYPE_ACTION,
257                                                      actions,
258                                                      "Invalid Mark ID");
259                         rule->fd_id = mark->id;
260                         rule->flags |= HNS3_RULE_FLAG_FDID;
261                         break;
262                 case RTE_FLOW_ACTION_TYPE_FLAG:
263                         rule->fd_id = HNS3_MAX_FILTER_ID;
264                         rule->flags |= HNS3_RULE_FLAG_FDID;
265                         break;
266                 case RTE_FLOW_ACTION_TYPE_COUNT:
267                         act_count =
268                             (const struct rte_flow_action_count *)actions->conf;
269                         counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
270                         if (act_count->id >= counter_num)
271                                 return rte_flow_error_set(error, EINVAL,
272                                                      RTE_FLOW_ERROR_TYPE_ACTION,
273                                                      actions,
274                                                      "Invalid counter id");
275                         rule->act_cnt = *act_count;
276                         rule->flags |= HNS3_RULE_FLAG_COUNTER;
277                         break;
278                 case RTE_FLOW_ACTION_TYPE_VOID:
279                         break;
280                 default:
281                         return rte_flow_error_set(error, ENOTSUP,
282                                                   RTE_FLOW_ERROR_TYPE_ACTION,
283                                                   NULL, "Unsupported action");
284                 }
285         }
286
287         return 0;
288 }
289
290 /* Parse to get the attr and action info of flow director rule. */
291 static int
292 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
293 {
294         if (!attr->ingress)
295                 return rte_flow_error_set(error, EINVAL,
296                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
297                                           attr, "Ingress can't be zero");
298         if (attr->egress)
299                 return rte_flow_error_set(error, ENOTSUP,
300                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
301                                           attr, "Not support egress");
302         if (attr->transfer)
303                 return rte_flow_error_set(error, ENOTSUP,
304                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
305                                           attr, "No support for transfer");
306         if (attr->priority)
307                 return rte_flow_error_set(error, ENOTSUP,
308                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
309                                           attr, "Not support priority");
310         if (attr->group)
311                 return rte_flow_error_set(error, ENOTSUP,
312                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
313                                           attr, "Not support group");
314         return 0;
315 }
316
317 static int
318 hns3_parse_eth(const struct rte_flow_item *item,
319                    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
320 {
321         const struct rte_flow_item_eth *eth_spec;
322         const struct rte_flow_item_eth *eth_mask;
323
324         if (item->spec == NULL && item->mask)
325                 return rte_flow_error_set(error, EINVAL,
326                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
327                                           "Can't configure FDIR with mask but without spec");
328
329         /* Only used to describe the protocol stack. */
330         if (item->spec == NULL && item->mask == NULL)
331                 return 0;
332
333         if (item->mask) {
334                 eth_mask = item->mask;
335                 if (eth_mask->type) {
336                         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
337                         rule->key_conf.mask.ether_type =
338                             rte_be_to_cpu_16(eth_mask->type);
339                 }
340                 if (!rte_is_zero_ether_addr(&eth_mask->src)) {
341                         hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
342                         memcpy(rule->key_conf.mask.src_mac,
343                                eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
344                 }
345                 if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
346                         hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
347                         memcpy(rule->key_conf.mask.dst_mac,
348                                eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
349                 }
350         }
351
352         eth_spec = item->spec;
353         rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
354         memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
355                RTE_ETHER_ADDR_LEN);
356         memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
357                RTE_ETHER_ADDR_LEN);
358         return 0;
359 }
360
361 static int
362 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
363                 struct rte_flow_error *error)
364 {
365         const struct rte_flow_item_vlan *vlan_spec;
366         const struct rte_flow_item_vlan *vlan_mask;
367
368         if (item->spec == NULL && item->mask)
369                 return rte_flow_error_set(error, EINVAL,
370                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
371                                           "Can't configure FDIR with mask but without spec");
372
373         rule->key_conf.vlan_num++;
374         if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
375                 return rte_flow_error_set(error, EINVAL,
376                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
377                                           "Vlan_num is more than 2");
378
379         /* Only used to describe the protocol stack. */
380         if (item->spec == NULL && item->mask == NULL)
381                 return 0;
382
383         if (item->mask) {
384                 vlan_mask = item->mask;
385                 if (vlan_mask->tci) {
386                         if (rule->key_conf.vlan_num == 1) {
387                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
388                                              1);
389                                 rule->key_conf.mask.vlan_tag1 =
390                                     rte_be_to_cpu_16(vlan_mask->tci);
391                         } else {
392                                 hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
393                                              1);
394                                 rule->key_conf.mask.vlan_tag2 =
395                                     rte_be_to_cpu_16(vlan_mask->tci);
396                         }
397                 }
398         }
399
400         vlan_spec = item->spec;
401         if (rule->key_conf.vlan_num == 1)
402                 rule->key_conf.spec.vlan_tag1 =
403                     rte_be_to_cpu_16(vlan_spec->tci);
404         else
405                 rule->key_conf.spec.vlan_tag2 =
406                     rte_be_to_cpu_16(vlan_spec->tci);
407         return 0;
408 }
409
410 static int
411 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
412                 struct rte_flow_error *error)
413 {
414         const struct rte_flow_item_ipv4 *ipv4_spec;
415         const struct rte_flow_item_ipv4 *ipv4_mask;
416
417         if (item->spec == NULL && item->mask)
418                 return rte_flow_error_set(error, EINVAL,
419                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
420                                           "Can't configure FDIR with mask but without spec");
421
422         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
423         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
424         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
425         /* Only used to describe the protocol stack. */
426         if (item->spec == NULL && item->mask == NULL)
427                 return 0;
428
429         if (item->mask) {
430                 ipv4_mask = item->mask;
431
432                 if (ipv4_mask->hdr.total_length ||
433                     ipv4_mask->hdr.packet_id ||
434                     ipv4_mask->hdr.fragment_offset ||
435                     ipv4_mask->hdr.time_to_live ||
436                     ipv4_mask->hdr.hdr_checksum) {
437                         return rte_flow_error_set(error, EINVAL,
438                                                   RTE_FLOW_ERROR_TYPE_ITEM,
439                                                   item,
440                                                   "Only support src & dst ip,tos,proto in IPV4");
441                 }
442
443                 if (ipv4_mask->hdr.src_addr) {
444                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
445                         rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
446                             rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
447                 }
448
449                 if (ipv4_mask->hdr.dst_addr) {
450                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
451                         rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
452                             rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
453                 }
454
455                 if (ipv4_mask->hdr.type_of_service) {
456                         hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
457                         rule->key_conf.mask.ip_tos =
458                             ipv4_mask->hdr.type_of_service;
459                 }
460
461                 if (ipv4_mask->hdr.next_proto_id) {
462                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
463                         rule->key_conf.mask.ip_proto =
464                             ipv4_mask->hdr.next_proto_id;
465                 }
466         }
467
468         ipv4_spec = item->spec;
469         rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
470             rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
471         rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
472             rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
473         rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
474         rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
475         return 0;
476 }
477
478 static int
479 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
480                 struct rte_flow_error *error)
481 {
482         const struct rte_flow_item_ipv6 *ipv6_spec;
483         const struct rte_flow_item_ipv6 *ipv6_mask;
484
485         if (item->spec == NULL && item->mask)
486                 return rte_flow_error_set(error, EINVAL,
487                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
488                                           "Can't configure FDIR with mask but without spec");
489
490         hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
491         rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
492         rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
493
494         /* Only used to describe the protocol stack. */
495         if (item->spec == NULL && item->mask == NULL)
496                 return 0;
497
498         if (item->mask) {
499                 ipv6_mask = item->mask;
500                 if (ipv6_mask->hdr.vtc_flow ||
501                     ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) {
502                         return rte_flow_error_set(error, EINVAL,
503                                                   RTE_FLOW_ERROR_TYPE_ITEM,
504                                                   item,
505                                                   "Only support src & dst ip,proto in IPV6");
506                 }
507                 net_addr_to_host(rule->key_conf.mask.src_ip,
508                                  (const rte_be32_t *)ipv6_mask->hdr.src_addr,
509                                  IP_ADDR_LEN);
510                 net_addr_to_host(rule->key_conf.mask.dst_ip,
511                                  (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
512                                  IP_ADDR_LEN);
513                 rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
514                 if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
515                         hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
516                 if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
517                         hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
518                 if (ipv6_mask->hdr.proto)
519                         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
520         }
521
522         ipv6_spec = item->spec;
523         net_addr_to_host(rule->key_conf.spec.src_ip,
524                          (const rte_be32_t *)ipv6_spec->hdr.src_addr,
525                          IP_ADDR_LEN);
526         net_addr_to_host(rule->key_conf.spec.dst_ip,
527                          (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
528                          IP_ADDR_LEN);
529         rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
530
531         return 0;
532 }
533
534 static int
535 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
536                struct rte_flow_error *error)
537 {
538         const struct rte_flow_item_tcp *tcp_spec;
539         const struct rte_flow_item_tcp *tcp_mask;
540
541         if (item->spec == NULL && item->mask)
542                 return rte_flow_error_set(error, EINVAL,
543                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
544                                           "Can't configure FDIR with mask but without spec");
545
546         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
547         rule->key_conf.spec.ip_proto = IPPROTO_TCP;
548         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
549
550         /* Only used to describe the protocol stack. */
551         if (item->spec == NULL && item->mask == NULL)
552                 return 0;
553
554         if (item->mask) {
555                 tcp_mask = item->mask;
556                 if (tcp_mask->hdr.sent_seq ||
557                     tcp_mask->hdr.recv_ack ||
558                     tcp_mask->hdr.data_off ||
559                     tcp_mask->hdr.tcp_flags ||
560                     tcp_mask->hdr.rx_win ||
561                     tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) {
562                         return rte_flow_error_set(error, EINVAL,
563                                                   RTE_FLOW_ERROR_TYPE_ITEM,
564                                                   item,
565                                                   "Only support src & dst port in TCP");
566                 }
567
568                 if (tcp_mask->hdr.src_port) {
569                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
570                         rule->key_conf.mask.src_port =
571                             rte_be_to_cpu_16(tcp_mask->hdr.src_port);
572                 }
573                 if (tcp_mask->hdr.dst_port) {
574                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
575                         rule->key_conf.mask.dst_port =
576                             rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
577                 }
578         }
579
580         tcp_spec = item->spec;
581         rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
582         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
583
584         return 0;
585 }
586
587 static int
588 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
589                struct rte_flow_error *error)
590 {
591         const struct rte_flow_item_udp *udp_spec;
592         const struct rte_flow_item_udp *udp_mask;
593
594         if (item->spec == NULL && item->mask)
595                 return rte_flow_error_set(error, EINVAL,
596                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
597                                           "Can't configure FDIR with mask but without spec");
598
599         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
600         rule->key_conf.spec.ip_proto = IPPROTO_UDP;
601         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
602         /* Only used to describe the protocol stack. */
603         if (item->spec == NULL && item->mask == NULL)
604                 return 0;
605
606         if (item->mask) {
607                 udp_mask = item->mask;
608                 if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
609                         return rte_flow_error_set(error, EINVAL,
610                                                   RTE_FLOW_ERROR_TYPE_ITEM,
611                                                   item,
612                                                   "Only support src & dst port in UDP");
613                 }
614                 if (udp_mask->hdr.src_port) {
615                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
616                         rule->key_conf.mask.src_port =
617                             rte_be_to_cpu_16(udp_mask->hdr.src_port);
618                 }
619                 if (udp_mask->hdr.dst_port) {
620                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
621                         rule->key_conf.mask.dst_port =
622                             rte_be_to_cpu_16(udp_mask->hdr.dst_port);
623                 }
624         }
625
626         udp_spec = item->spec;
627         rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
628         rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
629
630         return 0;
631 }
632
633 static int
634 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
635                 struct rte_flow_error *error)
636 {
637         const struct rte_flow_item_sctp *sctp_spec;
638         const struct rte_flow_item_sctp *sctp_mask;
639
640         if (item->spec == NULL && item->mask)
641                 return rte_flow_error_set(error, EINVAL,
642                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
643                                           "Can't configure FDIR with mask but without spec");
644
645         hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
646         rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
647         rule->key_conf.mask.ip_proto = IPPROTO_MASK;
648
649         /* Only used to describe the protocol stack. */
650         if (item->spec == NULL && item->mask == NULL)
651                 return 0;
652
653         if (item->mask) {
654                 sctp_mask = item->mask;
655                 if (sctp_mask->hdr.cksum)
656                         return rte_flow_error_set(error, EINVAL,
657                                                   RTE_FLOW_ERROR_TYPE_ITEM,
658                                                   item,
659                                                   "Only support src & dst port in SCTP");
660
661                 if (sctp_mask->hdr.src_port) {
662                         hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
663                         rule->key_conf.mask.src_port =
664                             rte_be_to_cpu_16(sctp_mask->hdr.src_port);
665                 }
666                 if (sctp_mask->hdr.dst_port) {
667                         hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
668                         rule->key_conf.mask.dst_port =
669                             rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
670                 }
671                 if (sctp_mask->hdr.tag) {
672                         hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
673                         rule->key_conf.mask.sctp_tag =
674                             rte_be_to_cpu_32(sctp_mask->hdr.tag);
675                 }
676         }
677
678         sctp_spec = item->spec;
679         rule->key_conf.spec.src_port =
680             rte_be_to_cpu_16(sctp_spec->hdr.src_port);
681         rule->key_conf.spec.dst_port =
682             rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
683         rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
684
685         return 0;
686 }
687
688 /*
689  * Check items before tunnel, save inner configs to outer configs,and clear
690  * inner configs.
691  * The key consists of two parts: meta_data and tuple keys.
692  * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
693  * packet(1bit).
694  * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
695  * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
696  * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
697  * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
698  * Vlantag2(16bit) and sctp-tag(32bit).
699  */
700 static int
701 hns3_handle_tunnel(const struct rte_flow_item *item,
702                    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
703 {
704         /* check eth config */
705         if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
706                 return rte_flow_error_set(error, EINVAL,
707                                           RTE_FLOW_ERROR_TYPE_ITEM,
708                                           item, "Outer eth mac is unsupported");
709         if (rule->input_set & BIT(INNER_ETH_TYPE)) {
710                 hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
711                 rule->key_conf.spec.outer_ether_type =
712                     rule->key_conf.spec.ether_type;
713                 rule->key_conf.mask.outer_ether_type =
714                     rule->key_conf.mask.ether_type;
715                 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
716                 rule->key_conf.spec.ether_type = 0;
717                 rule->key_conf.mask.ether_type = 0;
718         }
719
720         /* check vlan config */
721         if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
722                 return rte_flow_error_set(error, EINVAL,
723                                           RTE_FLOW_ERROR_TYPE_ITEM,
724                                           item,
725                                           "Outer vlan tags is unsupported");
726
727         /* clear vlan_num for inner vlan select */
728         rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
729         rule->key_conf.vlan_num = 0;
730
731         /* check L3 config */
732         if (rule->input_set &
733             (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
734                 return rte_flow_error_set(error, EINVAL,
735                                           RTE_FLOW_ERROR_TYPE_ITEM,
736                                           item, "Outer ip is unsupported");
737         if (rule->input_set & BIT(INNER_IP_PROTO)) {
738                 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
739                 rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
740                 rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
741                 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
742                 rule->key_conf.spec.ip_proto = 0;
743                 rule->key_conf.mask.ip_proto = 0;
744         }
745
746         /* check L4 config */
747         if (rule->input_set & BIT(INNER_SCTP_TAG))
748                 return rte_flow_error_set(error, EINVAL,
749                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
750                                           "Outer sctp tag is unsupported");
751
752         if (rule->input_set & BIT(INNER_SRC_PORT)) {
753                 hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
754                 rule->key_conf.spec.outer_src_port =
755                     rule->key_conf.spec.src_port;
756                 rule->key_conf.mask.outer_src_port =
757                     rule->key_conf.mask.src_port;
758                 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
759                 rule->key_conf.spec.src_port = 0;
760                 rule->key_conf.mask.src_port = 0;
761         }
762         if (rule->input_set & BIT(INNER_DST_PORT)) {
763                 hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
764                 rule->key_conf.spec.dst_port = 0;
765                 rule->key_conf.mask.dst_port = 0;
766         }
767         return 0;
768 }
769
770 static int
771 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
772                  struct rte_flow_error *error)
773 {
774         const struct rte_flow_item_vxlan *vxlan_spec;
775         const struct rte_flow_item_vxlan *vxlan_mask;
776
777         if (item->spec == NULL && item->mask)
778                 return rte_flow_error_set(error, EINVAL,
779                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
780                                           "Can't configure FDIR with mask but without spec");
781         else if (item->spec && (item->mask == NULL))
782                 return rte_flow_error_set(error, EINVAL,
783                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
784                                           "Tunnel packets must configure with mask");
785
786         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
787         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
788         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
789                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
790         else
791                 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
792
793         /* Only used to describe the protocol stack. */
794         if (item->spec == NULL && item->mask == NULL)
795                 return 0;
796
797         vxlan_mask = item->mask;
798         vxlan_spec = item->spec;
799
800         if (vxlan_mask->flags)
801                 return rte_flow_error_set(error, EINVAL,
802                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
803                                           "Flags is not supported in VxLAN");
804
805         /* VNI must be totally masked or not. */
806         if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
807             memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
808                 return rte_flow_error_set(error, EINVAL,
809                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
810                                           "VNI must be totally masked or not in VxLAN");
811         if (vxlan_mask->vni[0]) {
812                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
813                 memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
814                            VNI_OR_TNI_LEN);
815         }
816         memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
817                    VNI_OR_TNI_LEN);
818         return 0;
819 }
820
821 static int
822 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
823                  struct rte_flow_error *error)
824 {
825         const struct rte_flow_item_nvgre *nvgre_spec;
826         const struct rte_flow_item_nvgre *nvgre_mask;
827
828         if (item->spec == NULL && item->mask)
829                 return rte_flow_error_set(error, EINVAL,
830                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
831                                           "Can't configure FDIR with mask but without spec");
832         else if (item->spec && (item->mask == NULL))
833                 return rte_flow_error_set(error, EINVAL,
834                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
835                                           "Tunnel packets must configure with mask");
836
837         hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
838         rule->key_conf.spec.outer_proto = IPPROTO_GRE;
839         rule->key_conf.mask.outer_proto = IPPROTO_MASK;
840
841         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
842         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
843         rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
844         /* Only used to describe the protocol stack. */
845         if (item->spec == NULL && item->mask == NULL)
846                 return 0;
847
848         nvgre_mask = item->mask;
849         nvgre_spec = item->spec;
850
851         if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
852                 return rte_flow_error_set(error, EINVAL,
853                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
854                                           "Ver/protocal is not supported in NVGRE");
855
856         /* TNI must be totally masked or not. */
857         if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
858             memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
859                 return rte_flow_error_set(error, EINVAL,
860                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
861                                           "TNI must be totally masked or not in NVGRE");
862
863         if (nvgre_mask->tni[0]) {
864                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
865                 memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
866                            VNI_OR_TNI_LEN);
867         }
868         memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
869                    VNI_OR_TNI_LEN);
870
871         if (nvgre_mask->flow_id) {
872                 hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
873                 rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
874         }
875         rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
876         return 0;
877 }
878
879 static int
880 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
881                   struct rte_flow_error *error)
882 {
883         const struct rte_flow_item_geneve *geneve_spec;
884         const struct rte_flow_item_geneve *geneve_mask;
885
886         if (item->spec == NULL && item->mask)
887                 return rte_flow_error_set(error, EINVAL,
888                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
889                                           "Can't configure FDIR with mask but without spec");
890         else if (item->spec && (item->mask == NULL))
891                 return rte_flow_error_set(error, EINVAL,
892                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
893                                           "Tunnel packets must configure with mask");
894
895         hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
896         rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
897         rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
898         /* Only used to describe the protocol stack. */
899         if (item->spec == NULL && item->mask == NULL)
900                 return 0;
901
902         geneve_mask = item->mask;
903         geneve_spec = item->spec;
904
905         if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
906                 return rte_flow_error_set(error, EINVAL,
907                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
908                                           "Ver/protocal is not supported in GENEVE");
909         /* VNI must be totally masked or not. */
910         if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
911             memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
912                 return rte_flow_error_set(error, EINVAL,
913                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
914                                           "VNI must be totally masked or not in GENEVE");
915         if (geneve_mask->vni[0]) {
916                 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
917                 memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
918                            VNI_OR_TNI_LEN);
919         }
920         memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
921                    VNI_OR_TNI_LEN);
922         return 0;
923 }
924
925 static int
926 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
927                   struct rte_flow_error *error)
928 {
929         int ret;
930
931         switch (item->type) {
932         case RTE_FLOW_ITEM_TYPE_VXLAN:
933         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
934                 ret = hns3_parse_vxlan(item, rule, error);
935                 break;
936         case RTE_FLOW_ITEM_TYPE_NVGRE:
937                 ret = hns3_parse_nvgre(item, rule, error);
938                 break;
939         case RTE_FLOW_ITEM_TYPE_GENEVE:
940                 ret = hns3_parse_geneve(item, rule, error);
941                 break;
942         default:
943                 return rte_flow_error_set(error, ENOTSUP,
944                                           RTE_FLOW_ERROR_TYPE_HANDLE,
945                                           NULL, "Unsupported tunnel type!");
946         }
947         if (ret)
948                 return ret;
949         return hns3_handle_tunnel(item, rule, error);
950 }
951
952 static int
953 hns3_parse_normal(const struct rte_flow_item *item,
954                   struct hns3_fdir_rule *rule,
955                   struct items_step_mngr *step_mngr,
956                   struct rte_flow_error *error)
957 {
958         int ret;
959
960         switch (item->type) {
961         case RTE_FLOW_ITEM_TYPE_ETH:
962                 ret = hns3_parse_eth(item, rule, error);
963                 step_mngr->items = L2_next_items;
964                 step_mngr->count = ARRAY_SIZE(L2_next_items);
965                 break;
966         case RTE_FLOW_ITEM_TYPE_VLAN:
967                 ret = hns3_parse_vlan(item, rule, error);
968                 step_mngr->items = L2_next_items;
969                 step_mngr->count = ARRAY_SIZE(L2_next_items);
970                 break;
971         case RTE_FLOW_ITEM_TYPE_IPV4:
972                 ret = hns3_parse_ipv4(item, rule, error);
973                 step_mngr->items = L3_next_items;
974                 step_mngr->count = ARRAY_SIZE(L3_next_items);
975                 break;
976         case RTE_FLOW_ITEM_TYPE_IPV6:
977                 ret = hns3_parse_ipv6(item, rule, error);
978                 step_mngr->items = L3_next_items;
979                 step_mngr->count = ARRAY_SIZE(L3_next_items);
980                 break;
981         case RTE_FLOW_ITEM_TYPE_TCP:
982                 ret = hns3_parse_tcp(item, rule, error);
983                 step_mngr->items = L4_next_items;
984                 step_mngr->count = ARRAY_SIZE(L4_next_items);
985                 break;
986         case RTE_FLOW_ITEM_TYPE_UDP:
987                 ret = hns3_parse_udp(item, rule, error);
988                 step_mngr->items = L4_next_items;
989                 step_mngr->count = ARRAY_SIZE(L4_next_items);
990                 break;
991         case RTE_FLOW_ITEM_TYPE_SCTP:
992                 ret = hns3_parse_sctp(item, rule, error);
993                 step_mngr->items = L4_next_items;
994                 step_mngr->count = ARRAY_SIZE(L4_next_items);
995                 break;
996         default:
997                 return rte_flow_error_set(error, ENOTSUP,
998                                           RTE_FLOW_ERROR_TYPE_HANDLE,
999                                           NULL, "Unsupported normal type!");
1000         }
1001
1002         return ret;
1003 }
1004
1005 static int
1006 hns3_validate_item(const struct rte_flow_item *item,
1007                    struct items_step_mngr step_mngr,
1008                    struct rte_flow_error *error)
1009 {
1010         int i;
1011
1012         if (item->last)
1013                 return rte_flow_error_set(error, ENOTSUP,
1014                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
1015                                           "Not supported last point for range");
1016
1017         for (i = 0; i < step_mngr.count; i++) {
1018                 if (item->type == step_mngr.items[i])
1019                         break;
1020         }
1021
1022         if (i == step_mngr.count) {
1023                 return rte_flow_error_set(error, EINVAL,
1024                                           RTE_FLOW_ERROR_TYPE_ITEM,
1025                                           item, "Inval or missing item");
1026         }
1027         return 0;
1028 }
1029
1030 static inline bool
1031 is_tunnel_packet(enum rte_flow_item_type type)
1032 {
1033         if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1034             type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1035             type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1036             type == RTE_FLOW_ITEM_TYPE_GENEVE ||
1037             type == RTE_FLOW_ITEM_TYPE_MPLS)
1038                 return true;
1039         return false;
1040 }
1041
1042 /*
1043  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1044  * And get the flow director filter info BTW.
1045  * UDP/TCP/SCTP PATTERN:
1046  * The first not void item can be ETH or IPV4 or IPV6
1047  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1048  * The next not void item could be UDP or TCP or SCTP (optional)
1049  * The next not void item could be RAW (for flexbyte, optional)
1050  * The next not void item must be END.
1051  * A Fuzzy Match pattern can appear at any place before END.
1052  * Fuzzy Match is optional for IPV4 but is required for IPV6
1053  * MAC VLAN PATTERN:
1054  * The first not void item must be ETH.
1055  * The second not void item must be MAC VLAN.
1056  * The next not void item must be END.
1057  * ACTION:
1058  * The first not void action should be QUEUE or DROP.
1059  * The second not void optional action should be MARK,
1060  * mark_id is a uint32_t number.
1061  * The next not void action should be END.
1062  * UDP/TCP/SCTP pattern example:
1063  * ITEM         Spec                    Mask
1064  * ETH          NULL                    NULL
1065  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1066  *              dst_addr 192.167.3.50   0xFFFFFFFF
1067  * UDP/TCP/SCTP src_port        80      0xFFFF
1068  *              dst_port        80      0xFFFF
1069  * END
1070  * MAC VLAN pattern example:
1071  * ITEM         Spec                    Mask
1072  * ETH          dst_addr
1073                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1074                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1075  * MAC VLAN     tci     0x2016          0xEFFF
1076  * END
1077  * Other members in mask and spec should set to 0x00.
1078  * Item->last should be NULL.
1079  */
1080 static int
1081 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1082                        const struct rte_flow_item pattern[],
1083                        const struct rte_flow_action actions[],
1084                        struct hns3_fdir_rule *rule,
1085                        struct rte_flow_error *error)
1086 {
1087         struct hns3_adapter *hns = dev->data->dev_private;
1088         const struct rte_flow_item *item;
1089         struct items_step_mngr step_mngr;
1090         int ret;
1091
1092         /* FDIR is available only in PF driver */
1093         if (hns->is_vf)
1094                 return rte_flow_error_set(error, ENOTSUP,
1095                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1096                                           "Fdir not supported in VF");
1097
1098         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT)
1099                 return rte_flow_error_set(error, ENOTSUP,
1100                                           RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1101                                           "fdir_conf.mode isn't perfect");
1102
1103         step_mngr.items = first_items;
1104         step_mngr.count = ARRAY_SIZE(first_items);
1105         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1106                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1107                         continue;
1108
1109                 ret = hns3_validate_item(item, step_mngr, error);
1110                 if (ret)
1111                         return ret;
1112
1113                 if (is_tunnel_packet(item->type)) {
1114                         ret = hns3_parse_tunnel(item, rule, error);
1115                         if (ret)
1116                                 return ret;
1117                         step_mngr.items = tunnel_next_items;
1118                         step_mngr.count = ARRAY_SIZE(tunnel_next_items);
1119                 } else {
1120                         ret = hns3_parse_normal(item, rule, &step_mngr, error);
1121                         if (ret)
1122                                 return ret;
1123                 }
1124         }
1125
1126         return hns3_handle_actions(dev, actions, rule, error);
1127 }
1128
1129 void
1130 hns3_filterlist_init(struct rte_eth_dev *dev)
1131 {
1132         struct hns3_process_private *process_list = dev->process_private;
1133
1134         TAILQ_INIT(&process_list->fdir_list);
1135         TAILQ_INIT(&process_list->flow_list);
1136 }
1137
1138 static void
1139 hns3_filterlist_flush(struct rte_eth_dev *dev)
1140 {
1141         struct hns3_process_private *process_list = dev->process_private;
1142         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1143         struct hns3_flow_mem *flow_node;
1144
1145         fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1146         while (fdir_rule_ptr) {
1147                 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1148                 rte_free(fdir_rule_ptr);
1149                 fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1150         }
1151
1152         flow_node = TAILQ_FIRST(&process_list->flow_list);
1153         while (flow_node) {
1154                 TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1155                 rte_free(flow_node->flow);
1156                 rte_free(flow_node);
1157                 flow_node = TAILQ_FIRST(&process_list->flow_list);
1158         }
1159 }
1160
1161 static int
1162 hns3_flow_args_check(const struct rte_flow_attr *attr,
1163                      const struct rte_flow_item pattern[],
1164                      const struct rte_flow_action actions[],
1165                      struct rte_flow_error *error)
1166 {
1167         if (pattern == NULL)
1168                 return rte_flow_error_set(error, EINVAL,
1169                                           RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1170                                           NULL, "NULL pattern.");
1171
1172         if (actions == NULL)
1173                 return rte_flow_error_set(error, EINVAL,
1174                                           RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1175                                           NULL, "NULL action.");
1176
1177         if (attr == NULL)
1178                 return rte_flow_error_set(error, EINVAL,
1179                                           RTE_FLOW_ERROR_TYPE_ATTR,
1180                                           NULL, "NULL attribute.");
1181
1182         return hns3_check_attr(attr, error);
1183 }
1184
1185 /*
1186  * Check if the flow rule is supported by hns3.
1187  * It only checkes the format. Don't guarantee the rule can be programmed into
1188  * the HW. Because there can be no enough room for the rule.
1189  */
1190 static int
1191 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1192                    const struct rte_flow_item pattern[],
1193                    const struct rte_flow_action actions[],
1194                    struct rte_flow_error *error)
1195 {
1196         struct hns3_fdir_rule fdir_rule;
1197         int ret;
1198
1199         ret = hns3_flow_args_check(attr, pattern, actions, error);
1200         if (ret)
1201                 return ret;
1202
1203         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1204         return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1205 }
1206
1207 /*
1208  * Create or destroy a flow rule.
1209  * Theorically one rule can match more than one filters.
1210  * We will let it use the filter which it hitt first.
1211  * So, the sequence matters.
1212  */
1213 static struct rte_flow *
1214 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1215                  const struct rte_flow_item pattern[],
1216                  const struct rte_flow_action actions[],
1217                  struct rte_flow_error *error)
1218 {
1219         struct hns3_process_private *process_list = dev->process_private;
1220         struct hns3_adapter *hns = dev->data->dev_private;
1221         struct hns3_hw *hw = &hns->hw;
1222         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1223         struct hns3_flow_mem *flow_node;
1224         struct rte_flow *flow;
1225         struct hns3_fdir_rule fdir_rule;
1226         int ret;
1227
1228         ret = hns3_flow_args_check(attr, pattern, actions, error);
1229         if (ret)
1230                 return NULL;
1231
1232         flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1233         if (flow == NULL) {
1234                 rte_flow_error_set(error, ENOMEM,
1235                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1236                                    "Failed to allocate flow memory");
1237                 return NULL;
1238         }
1239         flow_node = rte_zmalloc("hns3 flow node",
1240                                 sizeof(struct hns3_flow_mem), 0);
1241         if (flow_node == NULL) {
1242                 rte_flow_error_set(error, ENOMEM,
1243                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1244                                    "Failed to allocate flow list memory");
1245                 rte_free(flow);
1246                 return NULL;
1247         }
1248
1249         flow_node->flow = flow;
1250         TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
1251
1252         memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1253         ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1254         if (ret)
1255                 goto out;
1256
1257         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1258                 ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
1259                                        fdir_rule.act_cnt.id, error);
1260                 if (ret)
1261                         goto out;
1262
1263                 flow->counter_id = fdir_rule.act_cnt.id;
1264         }
1265         ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1266         if (!ret) {
1267                 fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1268                                             sizeof(struct hns3_fdir_rule_ele),
1269                                             0);
1270                 if (fdir_rule_ptr == NULL) {
1271                         hns3_err(hw, "Failed to allocate fdir_rule memory");
1272                         ret = -ENOMEM;
1273                         goto err_fdir;
1274                 }
1275                 memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1276                         sizeof(struct hns3_fdir_rule));
1277                 TAILQ_INSERT_TAIL(&process_list->fdir_list,
1278                                   fdir_rule_ptr, entries);
1279                 flow->rule = fdir_rule_ptr;
1280                 flow->filter_type = RTE_ETH_FILTER_FDIR;
1281
1282                 return flow;
1283         }
1284
1285 err_fdir:
1286         if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1287                 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1288
1289         rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1290                            "Failed to create flow");
1291 out:
1292         TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1293         rte_free(flow_node);
1294         rte_free(flow);
1295         return NULL;
1296 }
1297
1298 /* Destroy a flow rule on hns3. */
1299 static int
1300 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1301                   struct rte_flow_error *error)
1302 {
1303         struct hns3_process_private *process_list = dev->process_private;
1304         struct hns3_adapter *hns = dev->data->dev_private;
1305         struct hns3_fdir_rule_ele *fdir_rule_ptr;
1306         struct hns3_flow_mem *flow_node;
1307         enum rte_filter_type filter_type;
1308         struct hns3_fdir_rule fdir_rule;
1309         int ret;
1310
1311         if (flow == NULL)
1312                 return rte_flow_error_set(error, EINVAL,
1313                                           RTE_FLOW_ERROR_TYPE_HANDLE,
1314                                           flow, "Flow is NULL");
1315         filter_type = flow->filter_type;
1316         switch (filter_type) {
1317         case RTE_ETH_FILTER_FDIR:
1318                 fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1319                 memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1320                            sizeof(struct hns3_fdir_rule));
1321
1322                 ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1323                 if (ret)
1324                         return rte_flow_error_set(error, EIO,
1325                                                   RTE_FLOW_ERROR_TYPE_HANDLE,
1326                                                   flow,
1327                                                   "Destroy FDIR fail.Try again");
1328                 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1329                         hns3_counter_release(dev, fdir_rule.act_cnt.id);
1330                 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1331                 rte_free(fdir_rule_ptr);
1332                 fdir_rule_ptr = NULL;
1333                 break;
1334         default:
1335                 return rte_flow_error_set(error, EINVAL,
1336                                           RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1337                                           "Unsupported filter type");
1338         }
1339
1340         TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) {
1341                 if (flow_node->flow == flow) {
1342                         TAILQ_REMOVE(&process_list->flow_list, flow_node,
1343                                      entries);
1344                         rte_free(flow_node);
1345                         flow_node = NULL;
1346                         break;
1347                 }
1348         }
1349         rte_free(flow);
1350         flow = NULL;
1351
1352         return 0;
1353 }
1354
1355 /*  Destroy all flow rules associated with a port on hns3. */
1356 static int
1357 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1358 {
1359         struct hns3_adapter *hns = dev->data->dev_private;
1360         int ret;
1361
1362         /* FDIR is available only in PF driver */
1363         if (!hns->is_vf) {
1364                 ret = hns3_clear_all_fdir_filter(hns);
1365                 if (ret) {
1366                         rte_flow_error_set(error, ret,
1367                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1368                                            NULL, "Failed to flush rule");
1369                         return ret;
1370                 }
1371                 hns3_counter_flush(dev);
1372         }
1373
1374         hns3_filterlist_flush(dev);
1375
1376         return 0;
1377 }
1378
1379 /* Query an existing flow rule. */
1380 static int
1381 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1382                 const struct rte_flow_action *actions, void *data,
1383                 struct rte_flow_error *error)
1384 {
1385         struct rte_flow_query_count *qc;
1386         int ret;
1387
1388         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1389                 switch (actions->type) {
1390                 case RTE_FLOW_ACTION_TYPE_VOID:
1391                         break;
1392                 case RTE_FLOW_ACTION_TYPE_COUNT:
1393                         qc = (struct rte_flow_query_count *)data;
1394                         ret = hns3_counter_query(dev, flow, qc, error);
1395                         if (ret)
1396                                 return ret;
1397                         break;
1398                 default:
1399                         return rte_flow_error_set(error, ENOTSUP,
1400                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1401                                                   actions,
1402                                                   "Query action only support count");
1403                 }
1404         }
1405         return 0;
1406 }
1407
1408 static const struct rte_flow_ops hns3_flow_ops = {
1409         .validate = hns3_flow_validate,
1410         .create = hns3_flow_create,
1411         .destroy = hns3_flow_destroy,
1412         .flush = hns3_flow_flush,
1413         .query = hns3_flow_query,
1414         .isolate = NULL,
1415 };
1416
1417 /*
1418  * The entry of flow API.
1419  * @param dev
1420  *   Pointer to Ethernet device.
1421  * @return
1422  *   0 on success, a negative errno value otherwise is set.
1423  */
1424 int
1425 hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
1426                      enum rte_filter_op filter_op, void *arg)
1427 {
1428         struct hns3_hw *hw;
1429         int ret = 0;
1430
1431         if (dev == NULL)
1432                 return -EINVAL;
1433         hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1434         switch (filter_type) {
1435         case RTE_ETH_FILTER_GENERIC:
1436                 if (filter_op != RTE_ETH_FILTER_GET)
1437                         return -EINVAL;
1438                 if (hw->adapter_state >= HNS3_NIC_CLOSED)
1439                         return -ENODEV;
1440                 *(const void **)arg = &hns3_flow_ops;
1441                 break;
1442         default:
1443                 hns3_err(hw, "Filter type (%d) not supported", filter_type);
1444                 ret = -EOPNOTSUPP;
1445                 break;
1446         }
1447
1448         return ret;
1449 }