73a336a645439b6e458f141acd1ac6986012e134
[dpdk.git] / drivers / net / hinic / hinic_pmd_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
25
26 #define HINIC_MAX_RX_QUEUE_NUM          64
27
28 #ifndef UINT8_MAX
29 #define UINT8_MAX          (u8)(~((u8)0))       /* 0xFF               */
30 #define UINT16_MAX         (u16)(~((u16)0))     /* 0xFFFF             */
31 #define UINT32_MAX         (u32)(~((u32)0))     /* 0xFFFFFFFF         */
32 #define UINT64_MAX         (u64)(~((u64)0))     /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX          (0x7F)
34 #endif
35
36 /* IPSURX MACRO */
37 #define PA_ETH_TYPE_ROCE                0
38 #define PA_ETH_TYPE_IPV4                1
39 #define PA_ETH_TYPE_IPV6                2
40 #define PA_ETH_TYPE_OTHER               3
41
42 #define PA_IP_PROTOCOL_TYPE_TCP         1
43 #define PA_IP_PROTOCOL_TYPE_UDP         2
44 #define PA_IP_PROTOCOL_TYPE_ICMP        3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP   4
46 #define PA_IP_PROTOCOL_TYPE_SCTP        5
47 #define PA_IP_PROTOCOL_TYPE_VRRP        112
48
49 #define IP_HEADER_PROTOCOL_TYPE_TCP     6
50
51 #define HINIC_MIN_N_TUPLE_PRIO          1
52 #define HINIC_MAX_N_TUPLE_PRIO          7
53
54 /* TCAM type mask in hardware */
55 #define TCAM_PKT_BGP_SPORT      1
56 #define TCAM_PKT_VRRP           2
57 #define TCAM_PKT_BGP_DPORT      3
58 #define TCAM_PKT_LACP           4
59
60 #define BGP_DPORT_ID            179
61 #define IPPROTO_VRRP            112
62
63 /* Packet type defined in hardware to perform filter */
64 #define PKT_IGMP_IPV4_TYPE     64
65 #define PKT_ICMP_IPV4_TYPE     65
66 #define PKT_ICMP_IPV6_TYPE     66
67 #define PKT_ICMP_IPV6RS_TYPE   67
68 #define PKT_ICMP_IPV6RA_TYPE   68
69 #define PKT_ICMP_IPV6NS_TYPE   69
70 #define PKT_ICMP_IPV6NA_TYPE   70
71 #define PKT_ICMP_IPV6RE_TYPE   71
72 #define PKT_DHCP_IPV4_TYPE     72
73 #define PKT_DHCP_IPV6_TYPE     73
74 #define PKT_LACP_TYPE          74
75 #define PKT_ARP_REQ_TYPE       79
76 #define PKT_ARP_REP_TYPE       80
77 #define PKT_ARP_TYPE           81
78 #define PKT_BGPD_DPORT_TYPE    83
79 #define PKT_BGPD_SPORT_TYPE    84
80 #define PKT_VRRP_TYPE          85
81
82 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
83         (&((struct hinic_nic_dev *)nic_dev)->filter)
84
85
86 /**
87  * Endless loop will never happen with below assumption
88  * 1. there is at least one no-void item(END)
89  * 2. cur is before END.
90  */
91 static inline const struct rte_flow_item *
92 next_no_void_pattern(const struct rte_flow_item pattern[],
93                 const struct rte_flow_item *cur)
94 {
95         const struct rte_flow_item *next =
96                 cur ? cur + 1 : &pattern[0];
97         while (1) {
98                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
99                         return next;
100                 next++;
101         }
102 }
103
104 static inline const struct rte_flow_action *
105 next_no_void_action(const struct rte_flow_action actions[],
106                 const struct rte_flow_action *cur)
107 {
108         const struct rte_flow_action *next =
109                 cur ? cur + 1 : &actions[0];
110         while (1) {
111                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
112                         return next;
113                 next++;
114         }
115 }
116
117 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
118                                         struct rte_flow_error *error)
119 {
120         /* Must be input direction */
121         if (!attr->ingress) {
122                 rte_flow_error_set(error, EINVAL,
123                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
124                         attr, "Only support ingress.");
125                 return -rte_errno;
126         }
127
128         if (attr->egress) {
129                 rte_flow_error_set(error, EINVAL,
130                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
131                                 attr, "Not support egress.");
132                 return -rte_errno;
133         }
134
135         if (attr->priority) {
136                 rte_flow_error_set(error, EINVAL,
137                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
138                                 attr, "Not support priority.");
139                 return -rte_errno;
140         }
141
142         if (attr->group) {
143                 rte_flow_error_set(error, EINVAL,
144                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
145                                 attr, "Not support group.");
146                 return -rte_errno;
147         }
148
149         return 0;
150 }
151
152 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
153                                 const struct rte_flow_item *pattern,
154                                 const struct rte_flow_action *actions,
155                                 struct rte_flow_error *error)
156 {
157         if (!pattern) {
158                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
159                                 NULL, "NULL pattern.");
160                 return -rte_errno;
161         }
162
163         if (!actions) {
164                 rte_flow_error_set(error, EINVAL,
165                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
166                                 NULL, "NULL action.");
167                 return -rte_errno;
168         }
169
170         if (!attr) {
171                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
172                                    NULL, "NULL attribute.");
173                 return -rte_errno;
174         }
175
176         return 0;
177 }
178
179 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
180                                         struct rte_flow_error *error)
181 {
182         /* The first non-void item should be MAC */
183         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
184                 rte_flow_error_set(error, EINVAL,
185                         RTE_FLOW_ERROR_TYPE_ITEM,
186                         item, "Not supported by ethertype filter");
187                 return -rte_errno;
188         }
189
190         /* Not supported last point for range */
191         if (item->last) {
192                 rte_flow_error_set(error, EINVAL,
193                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
194                         item, "Not supported last point for range");
195                 return -rte_errno;
196         }
197
198         /* Get the MAC info. */
199         if (!item->spec || !item->mask) {
200                 rte_flow_error_set(error, EINVAL,
201                                 RTE_FLOW_ERROR_TYPE_ITEM,
202                                 item, "Not supported by ethertype filter");
203                 return -rte_errno;
204         }
205         return 0;
206 }
207
208 static int
209 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
210                         const struct rte_flow_action *act,
211                         const struct rte_flow_action_queue *act_q,
212                         struct rte_eth_ethertype_filter *filter,
213                         struct rte_flow_error *error)
214 {
215         /* Parse action */
216         act = next_no_void_action(actions, NULL);
217         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
218                 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
219                 rte_flow_error_set(error, EINVAL,
220                                 RTE_FLOW_ERROR_TYPE_ACTION,
221                                 act, "Not supported action.");
222                 return -rte_errno;
223         }
224
225         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
226                 act_q = (const struct rte_flow_action_queue *)act->conf;
227                 filter->queue = act_q->index;
228         } else {
229                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
230         }
231
232         /* Check if the next non-void item is END */
233         act = next_no_void_action(actions, act);
234         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
235                 rte_flow_error_set(error, EINVAL,
236                                 RTE_FLOW_ERROR_TYPE_ACTION,
237                                 act, "Not supported action.");
238                 return -rte_errno;
239         }
240
241         return 0;
242 }
243
244 /**
245  * Parse the rule to see if it is a ethertype rule.
246  * And get the ethertype filter info BTW.
247  * pattern:
248  * The first not void item can be ETH.
249  * The next not void item must be END.
250  * action:
251  * The first not void action should be QUEUE.
252  * The next not void action should be END.
253  * pattern example:
254  * ITEM         Spec                    Mask
255  * ETH          type    0x0807          0xFFFF
256  * END
257  * other members in mask and spec should set to 0x00.
258  * item->last should be NULL.
259  */
260 static int
261 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
262                         const struct rte_flow_item *pattern,
263                         const struct rte_flow_action *actions,
264                         struct rte_eth_ethertype_filter *filter,
265                         struct rte_flow_error *error)
266 {
267         const struct rte_flow_item *item;
268         const struct rte_flow_action *act = NULL;
269         const struct rte_flow_item_eth *eth_spec;
270         const struct rte_flow_item_eth *eth_mask;
271         const struct rte_flow_action_queue *act_q = NULL;
272
273         if (hinic_check_filter_arg(attr, pattern, actions, error))
274                 return -rte_errno;
275
276         item = next_no_void_pattern(pattern, NULL);
277         if (hinic_check_ethertype_first_item(item, error))
278                 return -rte_errno;
279
280         eth_spec = (const struct rte_flow_item_eth *)item->spec;
281         eth_mask = (const struct rte_flow_item_eth *)item->mask;
282
283         /*
284          * Mask bits of source MAC address must be full of 0.
285          * Mask bits of destination MAC address must be full
286          * of 1 or full of 0.
287          */
288         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
289             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
290              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
291                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
292                                 item, "Invalid ether address mask");
293                 return -rte_errno;
294         }
295
296         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
297                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
298                                 item, "Invalid ethertype mask");
299                 return -rte_errno;
300         }
301
302         /*
303          * If mask bits of destination MAC address
304          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
305          */
306         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
307                 filter->mac_addr = eth_spec->dst;
308                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
309         } else {
310                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
311         }
312         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
313
314         /* Check if the next non-void item is END. */
315         item = next_no_void_pattern(pattern, item);
316         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
317                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
318                         item, "Not supported by ethertype filter.");
319                 return -rte_errno;
320         }
321
322         if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
323                 return -rte_errno;
324
325         if (hinic_check_ethertype_attr_ele(attr, error))
326                 return -rte_errno;
327
328         return 0;
329 }
330
331 static int
332 hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
333                         const struct rte_flow_attr *attr,
334                         const struct rte_flow_item pattern[],
335                         const struct rte_flow_action actions[],
336                         struct rte_eth_ethertype_filter *filter,
337                         struct rte_flow_error *error)
338 {
339         if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
340                 return -rte_errno;
341
342         /* NIC doesn't support MAC address. */
343         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
344                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
345                 rte_flow_error_set(error, EINVAL,
346                         RTE_FLOW_ERROR_TYPE_ITEM,
347                         NULL, "Not supported by ethertype filter");
348                 return -rte_errno;
349         }
350
351         if (filter->queue >= dev->data->nb_rx_queues) {
352                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
353                 rte_flow_error_set(error, EINVAL,
354                         RTE_FLOW_ERROR_TYPE_ITEM,
355                         NULL, "Queue index much too big");
356                 return -rte_errno;
357         }
358
359         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
360                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
361                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
362                 rte_flow_error_set(error, EINVAL,
363                         RTE_FLOW_ERROR_TYPE_ITEM,
364                         NULL, "IPv4/IPv6 not supported by ethertype filter");
365                 return -rte_errno;
366         }
367
368         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
369                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
370                 rte_flow_error_set(error, EINVAL,
371                         RTE_FLOW_ERROR_TYPE_ITEM,
372                         NULL, "Drop option is unsupported");
373                 return -rte_errno;
374         }
375
376         /* Hinic only support LACP/ARP for ether type */
377         if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
378                 filter->ether_type != RTE_ETHER_TYPE_ARP) {
379                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
380                 rte_flow_error_set(error, EINVAL,
381                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
382                         "only lacp/arp type supported by ethertype filter");
383                 return -rte_errno;
384         }
385
386         return 0;
387 }
388
389 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
390                                 struct rte_eth_ntuple_filter *filter,
391                                 struct rte_flow_error *error)
392 {
393         /* Must be input direction */
394         if (!attr->ingress) {
395                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
396                 rte_flow_error_set(error, EINVAL,
397                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
398                                    attr, "Only support ingress.");
399                 return -rte_errno;
400         }
401
402         if (attr->egress) {
403                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
404                 rte_flow_error_set(error, EINVAL,
405                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
406                                    attr, "Not support egress.");
407                 return -rte_errno;
408         }
409
410         if (attr->priority > 0xFFFF) {
411                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412                 rte_flow_error_set(error, EINVAL,
413                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
414                                    attr, "Error priority.");
415                 return -rte_errno;
416         }
417
418         if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
419                     attr->priority > HINIC_MAX_N_TUPLE_PRIO)
420                 filter->priority = 1;
421         else
422                 filter->priority = (uint16_t)attr->priority;
423
424         return 0;
425 }
426
427 static int
428 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
429                         const struct rte_flow_action actions[],
430                         struct rte_eth_ntuple_filter *filter,
431                         struct rte_flow_error *error)
432 {
433         const struct rte_flow_action *act;
434         /*
435          * n-tuple only supports forwarding,
436          * check if the first not void action is QUEUE.
437          */
438         act = next_no_void_action(actions, NULL);
439         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
440                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
441                 rte_flow_error_set(error, EINVAL,
442                         RTE_FLOW_ERROR_TYPE_ACTION,
443                         act, "Flow action type is not QUEUE.");
444                 return -rte_errno;
445         }
446         filter->queue =
447                 ((const struct rte_flow_action_queue *)act->conf)->index;
448
449         /* Check if the next not void item is END */
450         act = next_no_void_action(actions, act);
451         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
452                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
453                 rte_flow_error_set(error, EINVAL,
454                         RTE_FLOW_ERROR_TYPE_ACTION,
455                         act, "Next not void item is not END.");
456                 return -rte_errno;
457         }
458
459         return 0;
460 }
461
462 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
463                                         const struct rte_flow_item pattern[],
464                                         struct rte_flow_error *error)
465 {
466         const struct rte_flow_item *item;
467
468         /* The first not void item can be MAC or IPv4 */
469         item = next_no_void_pattern(pattern, NULL);
470
471         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
472                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
473                 rte_flow_error_set(error, EINVAL,
474                         RTE_FLOW_ERROR_TYPE_ITEM,
475                         item, "Not supported by ntuple filter");
476                 return -rte_errno;
477         }
478
479         /* Skip Ethernet */
480         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
481                 /* Not supported last point for range */
482                 if (item->last) {
483                         rte_flow_error_set(error,
484                                 EINVAL,
485                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
486                                 item, "Not supported last point for range");
487                         return -rte_errno;
488                 }
489                 /* if the first item is MAC, the content should be NULL */
490                 if (item->spec || item->mask) {
491                         rte_flow_error_set(error, EINVAL,
492                                 RTE_FLOW_ERROR_TYPE_ITEM,
493                                 item, "Not supported by ntuple filter");
494                         return -rte_errno;
495                 }
496                 /* check if the next not void item is IPv4 */
497                 item = next_no_void_pattern(pattern, item);
498                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
499                         rte_flow_error_set(error,
500                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
501                                 item, "Not supported by ntuple filter");
502                         return -rte_errno;
503                 }
504         }
505
506         *ipv4_item = item;
507         return 0;
508 }
509
510 static int
511 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
512                         const struct rte_flow_item pattern[],
513                         struct rte_eth_ntuple_filter *filter,
514                         struct rte_flow_error *error)
515 {
516         const struct rte_flow_item_ipv4 *ipv4_spec;
517         const struct rte_flow_item_ipv4 *ipv4_mask;
518         const struct rte_flow_item *item = *in_out_item;
519
520         /* Get the IPv4 info */
521         if (!item->spec || !item->mask) {
522                 rte_flow_error_set(error, EINVAL,
523                         RTE_FLOW_ERROR_TYPE_ITEM,
524                         item, "Invalid ntuple mask");
525                 return -rte_errno;
526         }
527         /* Not supported last point for range */
528         if (item->last) {
529                 rte_flow_error_set(error, EINVAL,
530                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
531                         item, "Not supported last point for range");
532                 return -rte_errno;
533         }
534
535         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
536         /*
537          * Only support src & dst addresses, protocol,
538          * others should be masked.
539          */
540         if (ipv4_mask->hdr.version_ihl ||
541                 ipv4_mask->hdr.type_of_service ||
542                 ipv4_mask->hdr.total_length ||
543                 ipv4_mask->hdr.packet_id ||
544                 ipv4_mask->hdr.fragment_offset ||
545                 ipv4_mask->hdr.time_to_live ||
546                 ipv4_mask->hdr.hdr_checksum ||
547                 !ipv4_mask->hdr.next_proto_id) {
548                 rte_flow_error_set(error,
549                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
550                         item, "Not supported by ntuple filter");
551                 return -rte_errno;
552         }
553
554         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
555         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
556         filter->proto_mask = ipv4_mask->hdr.next_proto_id;
557
558         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
559         filter->dst_ip = ipv4_spec->hdr.dst_addr;
560         filter->src_ip = ipv4_spec->hdr.src_addr;
561         filter->proto  = ipv4_spec->hdr.next_proto_id;
562
563         /* Get next no void item */
564         *in_out_item = next_no_void_pattern(pattern, item);
565         return 0;
566 }
567
568 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
569                                 const struct rte_flow_item pattern[],
570                                 struct rte_eth_ntuple_filter *filter,
571                                 struct rte_flow_error *error)
572 {
573         const struct rte_flow_item_tcp *tcp_spec;
574         const struct rte_flow_item_tcp *tcp_mask;
575         const struct rte_flow_item_icmp *icmp_mask;
576         const struct rte_flow_item *item = *in_out_item;
577         u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
578
579         if (item->type == RTE_FLOW_ITEM_TYPE_END)
580                 return 0;
581
582         /* Get TCP or UDP info */
583         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
584                 (!item->spec || !item->mask)) {
585                 memset(filter, 0, ntuple_filter_size);
586                 rte_flow_error_set(error, EINVAL,
587                         RTE_FLOW_ERROR_TYPE_ITEM,
588                         item, "Invalid ntuple mask");
589                 return -rte_errno;
590         }
591
592         /* Not supported last point for range */
593         if (item->last) {
594                 memset(filter, 0, ntuple_filter_size);
595                 rte_flow_error_set(error, EINVAL,
596                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
597                         item, "Not supported last point for range");
598                 return -rte_errno;
599         }
600
601         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
602                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
603
604                 /*
605                  * Only support src & dst ports, tcp flags,
606                  * others should be masked.
607                  */
608                 if (tcp_mask->hdr.sent_seq ||
609                         tcp_mask->hdr.recv_ack ||
610                         tcp_mask->hdr.data_off ||
611                         tcp_mask->hdr.rx_win ||
612                         tcp_mask->hdr.cksum ||
613                         tcp_mask->hdr.tcp_urp) {
614                         memset(filter, 0, ntuple_filter_size);
615                         rte_flow_error_set(error, EINVAL,
616                                 RTE_FLOW_ERROR_TYPE_ITEM,
617                                 item, "Not supported by ntuple filter");
618                         return -rte_errno;
619                 }
620
621                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
622                 filter->src_port_mask  = tcp_mask->hdr.src_port;
623                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
624                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
625                 } else if (!tcp_mask->hdr.tcp_flags) {
626                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
627                 } else {
628                         memset(filter, 0, ntuple_filter_size);
629                         rte_flow_error_set(error, EINVAL,
630                                 RTE_FLOW_ERROR_TYPE_ITEM,
631                                 item, "Not supported by ntuple filter");
632                         return -rte_errno;
633                 }
634
635                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
636                 filter->dst_port  = tcp_spec->hdr.dst_port;
637                 filter->src_port  = tcp_spec->hdr.src_port;
638                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
639         } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
640                 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
641
642                 /* ICMP all should be masked. */
643                 if (icmp_mask->hdr.icmp_cksum ||
644                         icmp_mask->hdr.icmp_ident ||
645                         icmp_mask->hdr.icmp_seq_nb ||
646                         icmp_mask->hdr.icmp_type ||
647                         icmp_mask->hdr.icmp_code) {
648                         memset(filter, 0, ntuple_filter_size);
649                         rte_flow_error_set(error, EINVAL,
650                                 RTE_FLOW_ERROR_TYPE_ITEM,
651                                 item, "Not supported by ntuple filter");
652                         return -rte_errno;
653                 }
654         }
655
656         /* Get next no void item */
657         *in_out_item = next_no_void_pattern(pattern, item);
658         return 0;
659 }
660
661 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
662                                         struct rte_eth_ntuple_filter *filter,
663                                         struct rte_flow_error *error)
664 {
665         /* Check if the next not void item is END */
666         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
667                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
668                 rte_flow_error_set(error, EINVAL,
669                         RTE_FLOW_ERROR_TYPE_ITEM,
670                         item, "Not supported by ntuple filter");
671                 return -rte_errno;
672         }
673         return 0;
674 }
675
676 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
677                                         const struct rte_flow_item pattern[],
678                                         struct rte_eth_ntuple_filter *filter,
679                                         struct rte_flow_error *error)
680 {
681         if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
682                 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
683                 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
684                 hinic_ntuple_item_check_end(item, filter, error))
685                 return -rte_errno;
686
687         return 0;
688 }
689
690 /**
691  * Parse the rule to see if it is a n-tuple rule.
692  * And get the n-tuple filter info BTW.
693  * pattern:
694  * The first not void item can be ETH or IPV4.
695  * The second not void item must be IPV4 if the first one is ETH.
696  * The third not void item must be UDP or TCP.
697  * The next not void item must be END.
698  * action:
699  * The first not void action should be QUEUE.
700  * The next not void action should be END.
701  * pattern example:
702  * ITEM         Spec                    Mask
703  * ETH          NULL                    NULL
704  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
705  *              dst_addr 192.167.3.50   0xFFFFFFFF
706  *              next_proto_id   17      0xFF
707  * UDP/TCP/     src_port        80      0xFFFF
708  * SCTP         dst_port        80      0xFFFF
709  * END
710  * other members in mask and spec should set to 0x00.
711  * item->last should be NULL.
712  * Please aware there's an asumption for all the parsers.
713  * rte_flow_item is using big endian, rte_flow_attr and
714  * rte_flow_action are using CPU order.
715  * Because the pattern is used to describe the packets,
716  * normally the packets should use network order.
717  */
718 static int
719 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
720                         const struct rte_flow_item pattern[],
721                         const struct rte_flow_action actions[],
722                         struct rte_eth_ntuple_filter *filter,
723                         struct rte_flow_error *error)
724 {
725         const struct rte_flow_item *item = NULL;
726
727         if (hinic_check_filter_arg(attr, pattern, actions, error))
728                 return -rte_errno;
729
730         if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
731                 return -rte_errno;
732
733         if (hinic_check_ntuple_act_ele(item, actions, filter, error))
734                 return -rte_errno;
735
736         if (hinic_check_ntuple_attr_ele(attr, filter, error))
737                 return -rte_errno;
738
739         return 0;
740 }
741
742 static int
743 hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
744                         const struct rte_flow_attr *attr,
745                         const struct rte_flow_item pattern[],
746                         const struct rte_flow_action actions[],
747                         struct rte_eth_ntuple_filter *filter,
748                         struct rte_flow_error *error)
749 {
750         int ret;
751
752         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
753         if (ret)
754                 return ret;
755
756         /* Hinic doesn't support tcp flags */
757         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
758                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
759                 rte_flow_error_set(error, EINVAL,
760                                    RTE_FLOW_ERROR_TYPE_ITEM,
761                                    NULL, "Not supported by ntuple filter");
762                 return -rte_errno;
763         }
764
765         /* Hinic doesn't support many priorities */
766         if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
767             filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
768                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
769                 rte_flow_error_set(error, EINVAL,
770                         RTE_FLOW_ERROR_TYPE_ITEM,
771                         NULL, "Priority not supported by ntuple filter");
772                 return -rte_errno;
773         }
774
775         if (filter->queue >= dev->data->nb_rx_queues)
776                 return -rte_errno;
777
778         /* Fixed value for hinic */
779         filter->flags = RTE_5TUPLE_FLAGS;
780         return 0;
781 }
782
783 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
784                                         const struct rte_flow_item pattern[],
785                                         struct rte_flow_error *error)
786 {
787         const struct rte_flow_item *item;
788
789         /* The first not void item can be MAC or IPv4  or TCP or UDP */
790         item = next_no_void_pattern(pattern, NULL);
791
792         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
793                 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
794                 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
795                 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
796                 rte_flow_error_set(error, EINVAL,
797                         RTE_FLOW_ERROR_TYPE_ITEM, item,
798                         "Not supported by fdir filter,support mac,ipv4,tcp,udp");
799                 return -rte_errno;
800         }
801
802         /* Not supported last point for range */
803         if (item->last) {
804                 rte_flow_error_set(error, EINVAL,
805                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
806                         "Not supported last point for range");
807                 return -rte_errno;
808         }
809
810         /* Skip Ethernet */
811         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
812                 /* All should be masked. */
813                 if (item->spec || item->mask) {
814                         rte_flow_error_set(error, EINVAL,
815                                 RTE_FLOW_ERROR_TYPE_ITEM,
816                                 item, "Not supported by fdir filter,support mac");
817                         return -rte_errno;
818                 }
819                 /* Check if the next not void item is IPv4 */
820                 item = next_no_void_pattern(pattern, item);
821                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
822                         rte_flow_error_set(error, EINVAL,
823                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
824                                 "Not supported by fdir filter,support mac,ipv4");
825                         return -rte_errno;
826                 }
827         }
828
829         *ip_item = item;
830         return 0;
831 }
832
833 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
834                                 const struct rte_flow_item pattern[],
835                                 struct hinic_fdir_rule *rule,
836                                 struct rte_flow_error *error)
837 {
838         const struct rte_flow_item_ipv4 *ipv4_spec;
839         const struct rte_flow_item_ipv4 *ipv4_mask;
840         const struct rte_flow_item *item = *in_out_item;
841
842         /* Get the IPv4 info */
843         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
844                 /* Not supported last point for range */
845                 if (item->last) {
846                         rte_flow_error_set(error, EINVAL,
847                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
848                                 item, "Not supported last point for range");
849                         return -rte_errno;
850                 }
851
852                 if (!item->mask) {
853                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
854                         rte_flow_error_set(error, EINVAL,
855                                 RTE_FLOW_ERROR_TYPE_ITEM,
856                                 item, "Invalid fdir filter mask");
857                         return -rte_errno;
858                 }
859
860                 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
861                 /*
862                  * Only support src & dst addresses,
863                  * others should be masked.
864                  */
865                 if (ipv4_mask->hdr.version_ihl ||
866                         ipv4_mask->hdr.type_of_service ||
867                         ipv4_mask->hdr.total_length ||
868                         ipv4_mask->hdr.packet_id ||
869                         ipv4_mask->hdr.fragment_offset ||
870                         ipv4_mask->hdr.time_to_live ||
871                         ipv4_mask->hdr.next_proto_id ||
872                         ipv4_mask->hdr.hdr_checksum) {
873                         rte_flow_error_set(error,
874                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
875                                 "Not supported by fdir filter, support src,dst ip");
876                         return -rte_errno;
877                 }
878
879                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
880                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
881
882                 if (item->spec) {
883                         ipv4_spec =
884                                 (const struct rte_flow_item_ipv4 *)item->spec;
885                         rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
886                         rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
887                 }
888
889                 /*
890                  * Check if the next not void item is
891                  * TCP or UDP or END.
892                  */
893                 item = next_no_void_pattern(pattern, item);
894                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
895                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
896                     item->type != RTE_FLOW_ITEM_TYPE_END) {
897                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
898                         rte_flow_error_set(error, EINVAL,
899                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
900                                 "Not supported by fdir filter, support tcp, udp, end");
901                         return -rte_errno;
902                 }
903         }
904
905         *in_out_item = item;
906         return 0;
907 }
908
909 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
910                                 const struct rte_flow_item pattern[],
911                                 struct hinic_fdir_rule *rule,
912                                 struct rte_flow_error *error)
913 {
914         const struct rte_flow_item_tcp *tcp_spec;
915         const struct rte_flow_item_tcp *tcp_mask;
916         const struct rte_flow_item_udp *udp_spec;
917         const struct rte_flow_item_udp *udp_mask;
918         const struct rte_flow_item *item = *in_out_item;
919
920         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
921                 /* Not supported last point for range */
922                 if (item->last) {
923                         rte_flow_error_set(error, EINVAL,
924                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
925                                 item, "Not supported last point for range");
926                         return -rte_errno;
927                 }
928
929                 /* Get TCP/UDP info */
930                 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
931                         /*
932                          * Only care about src & dst ports,
933                          * others should be masked.
934                          */
935                         if (!item->mask) {
936                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
937                                 rte_flow_error_set(error, EINVAL,
938                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
939                                         "Not supported by fdir filter,support src,dst ports");
940                                 return -rte_errno;
941                         }
942
943                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
944                         if (tcp_mask->hdr.sent_seq ||
945                                 tcp_mask->hdr.recv_ack ||
946                                 tcp_mask->hdr.data_off ||
947                                 tcp_mask->hdr.tcp_flags ||
948                                 tcp_mask->hdr.rx_win ||
949                                 tcp_mask->hdr.cksum ||
950                                 tcp_mask->hdr.tcp_urp) {
951                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
952                                 rte_flow_error_set(error, EINVAL,
953                                         RTE_FLOW_ERROR_TYPE_ITEM,
954                                         item, "Not supported by fdir filter,support tcp");
955                                 return -rte_errno;
956                         }
957
958                         rule->mask.src_port_mask = tcp_mask->hdr.src_port;
959                         rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
960
961                         if (item->spec) {
962                                 tcp_spec =
963                                         (const struct rte_flow_item_tcp *)
964                                         item->spec;
965                                 rule->hinic_fdir.src_port =
966                                         tcp_spec->hdr.src_port;
967                                 rule->hinic_fdir.dst_port =
968                                         tcp_spec->hdr.dst_port;
969                         }
970
971                 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
972                         /*
973                          * Only care about src & dst ports,
974                          * others should be masked.
975                          */
976                         if (!item->mask) {
977                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
978                                 rte_flow_error_set(error, EINVAL,
979                                         RTE_FLOW_ERROR_TYPE_ITEM,
980                                         item, "Not supported by fdir filter,support src,dst ports");
981                                 return -rte_errno;
982                         }
983
984                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
985                         if (udp_mask->hdr.dgram_len ||
986                             udp_mask->hdr.dgram_cksum) {
987                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
988                                 rte_flow_error_set(error, EINVAL,
989                                         RTE_FLOW_ERROR_TYPE_ITEM,
990                                         item, "Not supported by fdir filter,support udp");
991                                 return -rte_errno;
992                         }
993                         rule->mask.src_port_mask = udp_mask->hdr.src_port;
994                         rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
995
996                         if (item->spec) {
997                                 udp_spec =
998                                         (const struct rte_flow_item_udp *)
999                                         item->spec;
1000                                 rule->hinic_fdir.src_port =
1001                                         udp_spec->hdr.src_port;
1002                                 rule->hinic_fdir.dst_port =
1003                                         udp_spec->hdr.dst_port;
1004                         }
1005                 } else {
1006                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1007                         rte_flow_error_set(error, EINVAL,
1008                                 RTE_FLOW_ERROR_TYPE_ITEM,
1009                                 item, "Not supported by fdir filter,support tcp/udp");
1010                         return -rte_errno;
1011                 }
1012
1013                 /* Get next no void item */
1014                 *in_out_item = next_no_void_pattern(pattern, item);
1015         }
1016
1017         return 0;
1018 }
1019
1020 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
1021                                         struct hinic_fdir_rule *rule,
1022                                         struct rte_flow_error *error)
1023 {
1024         /* Check if the next not void item is END */
1025         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1026                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1027                 rte_flow_error_set(error, EINVAL,
1028                         RTE_FLOW_ERROR_TYPE_ITEM,
1029                         item, "Not supported by fdir filter,support end");
1030                 return -rte_errno;
1031         }
1032
1033         return 0;
1034 }
1035
1036 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
1037                                         const struct rte_flow_item pattern[],
1038                                         struct hinic_fdir_rule *rule,
1039                                         struct rte_flow_error *error)
1040 {
1041         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1042                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1043                 hinic_normal_item_check_l4(&item, pattern, rule, error) ||
1044                 hinic_normal_item_check_end(item, rule, error))
1045                 return -rte_errno;
1046
1047         return 0;
1048 }
1049
1050 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1051                                         struct hinic_fdir_rule *rule,
1052                                         struct rte_flow_error *error)
1053 {
1054         /* Must be input direction */
1055         if (!attr->ingress) {
1056                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1057                 rte_flow_error_set(error, EINVAL,
1058                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1059                                    attr, "Only support ingress.");
1060                 return -rte_errno;
1061         }
1062
1063         /* Not supported */
1064         if (attr->egress) {
1065                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1066                 rte_flow_error_set(error, EINVAL,
1067                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1068                                    attr, "Not support egress.");
1069                 return -rte_errno;
1070         }
1071
1072         /* Not supported */
1073         if (attr->priority) {
1074                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1075                 rte_flow_error_set(error, EINVAL,
1076                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1077                         attr, "Not support priority.");
1078                 return -rte_errno;
1079         }
1080
1081         return 0;
1082 }
1083
1084 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1085                                 const struct rte_flow_action actions[],
1086                                 struct hinic_fdir_rule *rule,
1087                                 struct rte_flow_error *error)
1088 {
1089         const struct rte_flow_action *act;
1090
1091         /* Check if the first not void action is QUEUE */
1092         act = next_no_void_action(actions, NULL);
1093         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1094                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1095                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1096                         item, "Not supported action.");
1097                 return -rte_errno;
1098         }
1099
1100         rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1101
1102         /* Check if the next not void item is END */
1103         act = next_no_void_action(actions, act);
1104         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1105                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1106                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1107                         act, "Not supported action.");
1108                 return -rte_errno;
1109         }
1110
1111         return 0;
1112 }
1113
1114 /**
1115  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1116  * And get the flow director filter info BTW.
1117  * UDP/TCP/SCTP PATTERN:
1118  * The first not void item can be ETH or IPV4 or IPV6
1119  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1120  * The next not void item could be UDP or TCP(optional)
1121  * The next not void item must be END.
1122  * ACTION:
1123  * The first not void action should be QUEUE.
1124  * The second not void optional action should be MARK,
1125  * mark_id is a uint32_t number.
1126  * The next not void action should be END.
1127  * UDP/TCP pattern example:
1128  * ITEM          Spec                                       Mask
1129  * ETH            NULL                                    NULL
1130  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1131  *                   dst_addr  1.2.3.5                 0xFFFFFFFF
1132  * UDP/TCP    src_port  80                         0xFFFF
1133  *                   dst_port  80                         0xFFFF
1134  * END
1135  * Other members in mask and spec should set to 0x00.
1136  * Item->last should be NULL.
1137  */
1138 static int
1139 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1140                                const struct rte_flow_item pattern[],
1141                                const struct rte_flow_action actions[],
1142                                struct hinic_fdir_rule *rule,
1143                                struct rte_flow_error *error)
1144 {
1145         const struct rte_flow_item *item = NULL;
1146
1147         if (hinic_check_filter_arg(attr, pattern, actions, error))
1148                 return -rte_errno;
1149
1150         if (hinic_check_normal_item_ele(item, pattern, rule, error))
1151                 return -rte_errno;
1152
1153         if (hinic_check_normal_attr_ele(attr, rule, error))
1154                 return -rte_errno;
1155
1156         if (hinic_check_normal_act_ele(item, actions, rule, error))
1157                 return -rte_errno;
1158
1159         return 0;
1160 }
1161
1162 static int
1163 hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1164                         const struct rte_flow_attr *attr,
1165                         const struct rte_flow_item pattern[],
1166                         const struct rte_flow_action actions[],
1167                         struct hinic_fdir_rule *rule,
1168                         struct rte_flow_error *error)
1169 {
1170         int ret;
1171
1172         ret = hinic_parse_fdir_filter_normal(attr, pattern,
1173                                                 actions, rule, error);
1174         if (ret)
1175                 return ret;
1176
1177         if (rule->queue >= dev->data->nb_rx_queues)
1178                 return -ENOTSUP;
1179
1180         return ret;
1181 }
1182
1183 /**
1184  * Check if the flow rule is supported by nic.
1185  * It only checkes the format. Don't guarantee the rule can be programmed into
1186  * the HW. Because there can be no enough room for the rule.
1187  */
1188 static int hinic_flow_validate(struct rte_eth_dev *dev,
1189                                 const struct rte_flow_attr *attr,
1190                                 const struct rte_flow_item pattern[],
1191                                 const struct rte_flow_action actions[],
1192                                 struct rte_flow_error *error)
1193 {
1194         struct rte_eth_ethertype_filter ethertype_filter;
1195         struct rte_eth_ntuple_filter ntuple_filter;
1196         struct hinic_fdir_rule fdir_rule;
1197         int ret;
1198
1199         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1200         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1201                                 actions, &ntuple_filter, error);
1202         if (!ret)
1203                 return 0;
1204
1205         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1206         ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1207                                 actions, &ethertype_filter, error);
1208
1209         if (!ret)
1210                 return 0;
1211
1212         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1213         ret = hinic_parse_fdir_filter(dev, attr, pattern,
1214                                 actions, &fdir_rule, error);
1215
1216         return ret;
1217 }
1218
1219 static inline int
1220 ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1221                  struct hinic_5tuple_filter_info *filter_info)
1222 {
1223         switch (filter->dst_ip_mask) {
1224         case UINT32_MAX:
1225                 filter_info->dst_ip_mask = 0;
1226                 filter_info->dst_ip = filter->dst_ip;
1227                 break;
1228         case 0:
1229                 filter_info->dst_ip_mask = 1;
1230                 filter_info->dst_ip = 0;
1231                 break;
1232         default:
1233                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1234                 return -EINVAL;
1235         }
1236
1237         switch (filter->src_ip_mask) {
1238         case UINT32_MAX:
1239                 filter_info->src_ip_mask = 0;
1240                 filter_info->src_ip = filter->src_ip;
1241                 break;
1242         case 0:
1243                 filter_info->src_ip_mask = 1;
1244                 filter_info->src_ip = 0;
1245                 break;
1246         default:
1247                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1248                 return -EINVAL;
1249         }
1250         return 0;
1251 }
1252
1253 static inline int
1254 ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1255                    struct hinic_5tuple_filter_info *filter_info)
1256 {
1257         switch (filter->dst_port_mask) {
1258         case UINT16_MAX:
1259                 filter_info->dst_port_mask = 0;
1260                 filter_info->dst_port = filter->dst_port;
1261                 break;
1262         case 0:
1263                 filter_info->dst_port_mask = 1;
1264                 filter_info->dst_port = 0;
1265                 break;
1266         default:
1267                 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1268                 return -EINVAL;
1269         }
1270
1271         switch (filter->src_port_mask) {
1272         case UINT16_MAX:
1273                 filter_info->src_port_mask = 0;
1274                 filter_info->src_port = filter->src_port;
1275                 break;
1276         case 0:
1277                 filter_info->src_port_mask = 1;
1278                 filter_info->src_port = 0;
1279                 break;
1280         default:
1281                 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1282                 return -EINVAL;
1283         }
1284
1285         return 0;
1286 }
1287
1288 static inline int
1289 ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1290                     struct hinic_5tuple_filter_info *filter_info)
1291 {
1292         switch (filter->proto_mask) {
1293         case UINT8_MAX:
1294                 filter_info->proto_mask = 0;
1295                 filter_info->proto = filter->proto;
1296                 break;
1297         case 0:
1298                 filter_info->proto_mask = 1;
1299                 filter_info->proto = 0;
1300                 break;
1301         default:
1302                 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1303                 return -EINVAL;
1304         }
1305
1306         return 0;
1307 }
1308
1309 static inline int
1310 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1311                         struct hinic_5tuple_filter_info *filter_info)
1312 {
1313         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1314                 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1315                 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1316                 return -EINVAL;
1317
1318         if (ntuple_ip_filter(filter, filter_info) ||
1319                 ntuple_port_filter(filter, filter_info) ||
1320                 ntuple_proto_filter(filter, filter_info))
1321                 return -EINVAL;
1322
1323         filter_info->priority = (uint8_t)filter->priority;
1324         return 0;
1325 }
1326
1327 static inline struct hinic_5tuple_filter *
1328 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1329                            struct hinic_5tuple_filter_info *key)
1330 {
1331         struct hinic_5tuple_filter *it;
1332
1333         TAILQ_FOREACH(it, filter_list, entries) {
1334                 if (memcmp(key, &it->filter_info,
1335                         sizeof(struct hinic_5tuple_filter_info)) == 0) {
1336                         return it;
1337                 }
1338         }
1339
1340         return NULL;
1341 }
1342
1343 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1344 {
1345         struct tag_pa_rule bgp_rule;
1346         struct tag_pa_action bgp_action;
1347
1348         memset(&bgp_rule, 0, sizeof(bgp_rule));
1349         memset(&bgp_action, 0, sizeof(bgp_action));
1350         /* BGP TCAM rule */
1351         bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1352         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1353         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1354         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1355         bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1356         bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1357
1358         /* BGP TCAM action */
1359         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1360         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1361         bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1362         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1363                                * results, not need to convert
1364                                */
1365         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1366
1367         return hinic_set_fdir_tcam(nic_dev->hwdev,
1368                         TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1369 }
1370
1371 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1372 {
1373         struct tag_pa_rule bgp_rule;
1374         struct tag_pa_action bgp_action;
1375
1376         memset(&bgp_rule, 0, sizeof(bgp_rule));
1377         memset(&bgp_action, 0, sizeof(bgp_action));
1378         /* BGP TCAM rule */
1379         bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1380         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1381         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1382         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1383         bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1384         bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1385
1386         /* BGP TCAM action */
1387         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1388         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1389         bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1390         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1391                                * results, not need to convert
1392                                */
1393         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1394
1395         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1396                                         &bgp_rule, &bgp_action);
1397 }
1398
1399 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1400 {
1401         struct tag_pa_rule vrrp_rule;
1402         struct tag_pa_action vrrp_action;
1403
1404         memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1405         memset(&vrrp_action, 0, sizeof(vrrp_action));
1406         /* VRRP TCAM rule */
1407         vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1408         vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1409         vrrp_rule.ip_header.protocol.mask8 = 0xff;
1410         vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1411
1412         /* VRRP TCAM action */
1413         vrrp_action.err_type = 0x3f;
1414         vrrp_action.fwd_action = 0x7;
1415         vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1416         vrrp_action.pri = 0xf;
1417         vrrp_action.push_len = 0xf;
1418
1419         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1420                                         &vrrp_rule, &vrrp_action);
1421 }
1422
1423 static int
1424 hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1425                        struct hinic_filter_info *filter_info)
1426 {
1427         switch (filter->filter_info.proto) {
1428         case IPPROTO_TCP:
1429                 /* Filter type is bgp type if dst_port or src_port is 179 */
1430                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1431                         !(filter->filter_info.dst_port_mask)) {
1432                         filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1433                 } else if (filter->filter_info.src_port ==
1434                         RTE_BE16(BGP_DPORT_ID) &&
1435                         !(filter->filter_info.src_port_mask)) {
1436                         filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1437                 } else {
1438                         PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1439                         " just support BGP now, proto:0x%x, "
1440                         "dst_port:0x%x, dst_port_mask:0x%x."
1441                         "src_port:0x%x, src_port_mask:0x%x.",
1442                         filter->filter_info.proto,
1443                         filter->filter_info.dst_port,
1444                         filter->filter_info.dst_port_mask,
1445                         filter->filter_info.src_port,
1446                         filter->filter_info.src_port_mask);
1447                         return -EINVAL;
1448                 }
1449                 break;
1450
1451         case IPPROTO_VRRP:
1452                 filter_info->pkt_type = PKT_VRRP_TYPE;
1453                 break;
1454
1455         case IPPROTO_ICMP:
1456                 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1457                 break;
1458
1459         case IPPROTO_ICMPV6:
1460                 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1461                 break;
1462
1463         default:
1464                 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1465                 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1466                 "src_port: 0x%x, src_port_mask: 0x%x.",
1467                 filter->filter_info.proto, filter->filter_info.dst_port,
1468                 filter->filter_info.dst_port_mask,
1469                 filter->filter_info.src_port,
1470                 filter->filter_info.src_port_mask);
1471                 return -EINVAL;
1472         }
1473
1474         return 0;
1475 }
1476
1477 static int
1478 hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1479                         struct hinic_filter_info *filter_info,
1480                         int *index)
1481 {
1482         int type_id;
1483
1484         type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1485
1486         if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1487                 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1488                 return -EINVAL;
1489         }
1490
1491         if (!(filter_info->type_mask & (1 << type_id))) {
1492                 filter_info->type_mask |= 1 << type_id;
1493                 filter->index = type_id;
1494                 filter_info->pkt_filters[type_id].enable = true;
1495                 filter_info->pkt_filters[type_id].pkt_proto =
1496                                                 filter->filter_info.proto;
1497                 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1498                                   filter, entries);
1499         } else {
1500                 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1501                 return -EIO;
1502         }
1503
1504         *index = type_id;
1505         return 0;
1506 }
1507
1508 /*
1509  * Add a 5tuple filter
1510  *
1511  * @param dev:
1512  *  Pointer to struct rte_eth_dev.
1513  * @param filter:
1514  *  Pointer to the filter that will be added.
1515  * @return
1516  *    - On success, zero.
1517  *    - On failure, a negative value.
1518  */
1519 static int
1520 hinic_add_5tuple_filter(struct rte_eth_dev *dev,
1521                         struct hinic_5tuple_filter *filter)
1522 {
1523         struct hinic_filter_info *filter_info =
1524                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1525         int i, ret_fw;
1526         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1527
1528         if (hinic_filter_info_init(filter, filter_info) ||
1529                 hinic_lookup_new_filter(filter, filter_info, &i))
1530                 return -EFAULT;
1531
1532         ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
1533                                         filter_info->qid,
1534                                         filter_info->pkt_filters[i].enable,
1535                                         true);
1536         if (ret_fw) {
1537                 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1538                         filter_info->pkt_type, filter->queue,
1539                         filter_info->pkt_filters[i].enable);
1540                 return -EFAULT;
1541         }
1542
1543         PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1544                         filter_info->pkt_type, filter_info->qid,
1545                         filter_info->pkt_filters[filter->index].enable);
1546
1547         switch (filter->filter_info.proto) {
1548         case IPPROTO_TCP:
1549                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
1550                         ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
1551                         if (ret_fw) {
1552                                 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
1553                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
1554                                         filter_info->pkt_type, filter->queue,
1555                                         filter_info->pkt_filters[i].enable);
1556                                 return -EFAULT;
1557                         }
1558
1559                         PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
1560                                 filter->queue,
1561                                 filter_info->pkt_filters[i].enable);
1562                 } else if (filter->filter_info.src_port ==
1563                         RTE_BE16(BGP_DPORT_ID)) {
1564                         ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
1565                         if (ret_fw) {
1566                                 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
1567                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
1568                                         filter_info->pkt_type, filter->queue,
1569                                         filter_info->pkt_filters[i].enable);
1570                                 return -EFAULT;
1571                         }
1572
1573                         PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
1574                                         filter->queue,
1575                                         filter_info->pkt_filters[i].enable);
1576                 }
1577
1578                 break;
1579
1580         case IPPROTO_VRRP:
1581                 ret_fw = hinic_set_vrrp_tcam(nic_dev);
1582                 if (ret_fw) {
1583                         PMD_DRV_LOG(ERR, "Set VRRP failed, "
1584                                 "type: 0x%x, qid: 0x%x, enable: 0x%x",
1585                                 filter_info->pkt_type, filter->queue,
1586                                 filter_info->pkt_filters[i].enable);
1587                         return -EFAULT;
1588                 }
1589                 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
1590                                 filter->queue,
1591                                 filter_info->pkt_filters[i].enable);
1592                 break;
1593
1594         default:
1595                 break;
1596         }
1597
1598         return 0;
1599 }
1600
1601 /*
1602  * Remove a 5tuple filter
1603  *
1604  * @param dev
1605  *  Pointer to struct rte_eth_dev.
1606  * @param filter
1607  *  The pointer of the filter will be removed.
1608  */
1609 static void
1610 hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
1611                            struct hinic_5tuple_filter *filter)
1612 {
1613         struct hinic_filter_info *filter_info =
1614                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1615         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1616
1617         switch (filter->filter_info.proto) {
1618         case IPPROTO_VRRP:
1619                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1620                 break;
1621
1622         case IPPROTO_TCP:
1623                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
1624                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
1625                                                         TCAM_PKT_BGP_DPORT);
1626                 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
1627                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
1628                                                         TCAM_PKT_BGP_SPORT);
1629                 break;
1630
1631         default:
1632                 break;
1633         }
1634
1635         hinic_filter_info_init(filter, filter_info);
1636
1637         filter_info->pkt_filters[filter->index].enable = false;
1638         filter_info->pkt_filters[filter->index].pkt_proto = 0;
1639
1640         PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1641                 filter_info->pkt_type,
1642                 filter_info->pkt_filters[filter->index].qid,
1643                 filter_info->pkt_filters[filter->index].enable);
1644         (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
1645                                 filter_info->pkt_filters[filter->index].qid,
1646                                 filter_info->pkt_filters[filter->index].enable,
1647                                 true);
1648
1649         filter_info->pkt_type = 0;
1650         filter_info->qid = 0;
1651         filter_info->pkt_filters[filter->index].qid = 0;
1652         filter_info->type_mask &= ~(1 <<  (filter->index));
1653         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
1654
1655         rte_free(filter);
1656 }
1657
1658 /*
1659  * Add or delete a ntuple filter
1660  *
1661  * @param dev
1662  *  Pointer to struct rte_eth_dev.
1663  * @param ntuple_filter
1664  *  Pointer to struct rte_eth_ntuple_filter
1665  * @param add
1666  *  If true, add filter; if false, remove filter
1667  * @return
1668  *    - On success, zero.
1669  *    - On failure, a negative value.
1670  */
1671 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
1672                                 struct rte_eth_ntuple_filter *ntuple_filter,
1673                                 bool add)
1674 {
1675         struct hinic_filter_info *filter_info =
1676                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1677         struct hinic_5tuple_filter_info filter_5tuple;
1678         struct hinic_5tuple_filter *filter;
1679         int ret;
1680
1681         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
1682                 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
1683                 return -EINVAL;
1684         }
1685
1686         memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
1687         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
1688         if (ret < 0)
1689                 return ret;
1690
1691         filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
1692                                          &filter_5tuple);
1693         if (filter != NULL && add) {
1694                 PMD_DRV_LOG(ERR, "Filter exists.");
1695                 return -EEXIST;
1696         }
1697         if (filter == NULL && !add) {
1698                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
1699                 return -ENOENT;
1700         }
1701
1702         if (add) {
1703                 filter = rte_zmalloc("hinic_5tuple_filter",
1704                                 sizeof(struct hinic_5tuple_filter), 0);
1705                 if (filter == NULL)
1706                         return -ENOMEM;
1707                 rte_memcpy(&filter->filter_info, &filter_5tuple,
1708                                 sizeof(struct hinic_5tuple_filter_info));
1709                 filter->queue = ntuple_filter->queue;
1710
1711                 filter_info->qid = ntuple_filter->queue;
1712
1713                 ret = hinic_add_5tuple_filter(dev, filter);
1714                 if (ret)
1715                         rte_free(filter);
1716
1717                 return ret;
1718         }
1719
1720         hinic_remove_5tuple_filter(dev, filter);
1721
1722         return 0;
1723 }
1724
1725 /**
1726  * Create or destroy a flow rule.
1727  * Theorically one rule can match more than one filters.
1728  * We will let it use the filter which it hitt first.
1729  * So, the sequence matters.
1730  */
1731 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
1732                                         const struct rte_flow_attr *attr,
1733                                         const struct rte_flow_item pattern[],
1734                                         const struct rte_flow_action actions[],
1735                                         struct rte_flow_error *error)
1736 {
1737         int ret;
1738         struct rte_eth_ntuple_filter ntuple_filter;
1739         struct rte_flow *flow = NULL;
1740         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
1741         struct hinic_flow_mem *hinic_flow_mem_ptr;
1742         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1743
1744         flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
1745         if (!flow) {
1746                 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
1747                 return NULL;
1748         }
1749
1750         hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
1751                         sizeof(struct hinic_flow_mem), 0);
1752         if (!hinic_flow_mem_ptr) {
1753                 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
1754                 rte_free(flow);
1755                 return NULL;
1756         }
1757
1758         hinic_flow_mem_ptr->flow = flow;
1759         TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
1760                                 entries);
1761
1762         /* add ntuple filter */
1763         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1764         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1765                         actions, &ntuple_filter, error);
1766         if (ret)
1767                 goto out;
1768
1769         ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1770         if (ret)
1771                 goto out;
1772         ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
1773                         sizeof(struct hinic_ntuple_filter_ele), 0);
1774         rte_memcpy(&ntuple_filter_ptr->filter_info,
1775                    &ntuple_filter,
1776                    sizeof(struct rte_eth_ntuple_filter));
1777         TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
1778         ntuple_filter_ptr, entries);
1779         flow->rule = ntuple_filter_ptr;
1780         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1781
1782         PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
1783         hinic_global_func_id(nic_dev->hwdev));
1784         return flow;
1785
1786 out:
1787         TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
1788         rte_flow_error_set(error, -ret,
1789                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1790                            "Failed to create flow.");
1791         rte_free(hinic_flow_mem_ptr);
1792         rte_free(flow);
1793         return NULL;
1794 }
1795
1796 /* Destroy a flow rule on hinic. */
1797 static int hinic_flow_destroy(struct rte_eth_dev *dev,
1798                               struct rte_flow *flow,
1799                               struct rte_flow_error *error)
1800 {
1801         int ret;
1802         struct rte_flow *pmd_flow = flow;
1803         enum rte_filter_type filter_type = pmd_flow->filter_type;
1804         struct rte_eth_ntuple_filter ntuple_filter;
1805         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
1806         struct hinic_flow_mem *hinic_flow_mem_ptr;
1807         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1808
1809         switch (filter_type) {
1810         case RTE_ETH_FILTER_NTUPLE:
1811                 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
1812                                         pmd_flow->rule;
1813                 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
1814                         sizeof(struct rte_eth_ntuple_filter));
1815                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
1816                 if (!ret) {
1817                         TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
1818                                 ntuple_filter_ptr, entries);
1819                         rte_free(ntuple_filter_ptr);
1820                 }
1821                 break;
1822         default:
1823                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1824                         filter_type);
1825                 ret = -EINVAL;
1826                 break;
1827         }
1828
1829         if (ret) {
1830                 rte_flow_error_set(error, EINVAL,
1831                                 RTE_FLOW_ERROR_TYPE_HANDLE,
1832                                 NULL, "Failed to destroy flow");
1833                 return ret;
1834         }
1835
1836         TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
1837                 if (hinic_flow_mem_ptr->flow == pmd_flow) {
1838                         TAILQ_REMOVE(&nic_dev->hinic_flow_list,
1839                                 hinic_flow_mem_ptr, entries);
1840                         rte_free(hinic_flow_mem_ptr);
1841                         break;
1842                 }
1843         }
1844         rte_free(flow);
1845
1846         PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
1847                         hinic_global_func_id(nic_dev->hwdev));
1848
1849         return ret;
1850 }
1851
1852 const struct rte_flow_ops hinic_flow_ops = {
1853         .validate = hinic_flow_validate,
1854         .create = hinic_flow_create,
1855         .destroy = hinic_flow_destroy,
1856 };