net/hinic: flush flow director filter
[dpdk.git] / drivers / net / hinic / hinic_pmd_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
25
26 #define HINIC_MAX_RX_QUEUE_NUM          64
27
28 #ifndef UINT8_MAX
29 #define UINT8_MAX          (u8)(~((u8)0))       /* 0xFF               */
30 #define UINT16_MAX         (u16)(~((u16)0))     /* 0xFFFF             */
31 #define UINT32_MAX         (u32)(~((u32)0))     /* 0xFFFFFFFF         */
32 #define UINT64_MAX         (u64)(~((u64)0))     /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX          (0x7F)
34 #endif
35
36 /* IPSURX MACRO */
37 #define PA_ETH_TYPE_ROCE                0
38 #define PA_ETH_TYPE_IPV4                1
39 #define PA_ETH_TYPE_IPV6                2
40 #define PA_ETH_TYPE_OTHER               3
41
42 #define PA_IP_PROTOCOL_TYPE_TCP         1
43 #define PA_IP_PROTOCOL_TYPE_UDP         2
44 #define PA_IP_PROTOCOL_TYPE_ICMP        3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP   4
46 #define PA_IP_PROTOCOL_TYPE_SCTP        5
47 #define PA_IP_PROTOCOL_TYPE_VRRP        112
48
49 #define IP_HEADER_PROTOCOL_TYPE_TCP     6
50
51 #define HINIC_MIN_N_TUPLE_PRIO          1
52 #define HINIC_MAX_N_TUPLE_PRIO          7
53
54 /* TCAM type mask in hardware */
55 #define TCAM_PKT_BGP_SPORT      1
56 #define TCAM_PKT_VRRP           2
57 #define TCAM_PKT_BGP_DPORT      3
58 #define TCAM_PKT_LACP           4
59
60 #define BGP_DPORT_ID            179
61 #define IPPROTO_VRRP            112
62
63 /* Packet type defined in hardware to perform filter */
64 #define PKT_IGMP_IPV4_TYPE     64
65 #define PKT_ICMP_IPV4_TYPE     65
66 #define PKT_ICMP_IPV6_TYPE     66
67 #define PKT_ICMP_IPV6RS_TYPE   67
68 #define PKT_ICMP_IPV6RA_TYPE   68
69 #define PKT_ICMP_IPV6NS_TYPE   69
70 #define PKT_ICMP_IPV6NA_TYPE   70
71 #define PKT_ICMP_IPV6RE_TYPE   71
72 #define PKT_DHCP_IPV4_TYPE     72
73 #define PKT_DHCP_IPV6_TYPE     73
74 #define PKT_LACP_TYPE          74
75 #define PKT_ARP_REQ_TYPE       79
76 #define PKT_ARP_REP_TYPE       80
77 #define PKT_ARP_TYPE           81
78 #define PKT_BGPD_DPORT_TYPE    83
79 #define PKT_BGPD_SPORT_TYPE    84
80 #define PKT_VRRP_TYPE          85
81
82 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
83         (&((struct hinic_nic_dev *)nic_dev)->filter)
84
85 enum hinic_atr_flow_type {
86         HINIC_ATR_FLOW_TYPE_IPV4_DIP    = 0x1,
87         HINIC_ATR_FLOW_TYPE_IPV4_SIP    = 0x2,
88         HINIC_ATR_FLOW_TYPE_DPORT       = 0x3,
89         HINIC_ATR_FLOW_TYPE_SPORT       = 0x4,
90 };
91
92 /* Structure to store fdir's info. */
93 struct hinic_fdir_info {
94         uint8_t fdir_flag;
95         uint8_t qid;
96         uint32_t fdir_key;
97 };
98
99 /**
100  * Endless loop will never happen with below assumption
101  * 1. there is at least one no-void item(END)
102  * 2. cur is before END.
103  */
104 static inline const struct rte_flow_item *
105 next_no_void_pattern(const struct rte_flow_item pattern[],
106                 const struct rte_flow_item *cur)
107 {
108         const struct rte_flow_item *next =
109                 cur ? cur + 1 : &pattern[0];
110         while (1) {
111                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
112                         return next;
113                 next++;
114         }
115 }
116
117 static inline const struct rte_flow_action *
118 next_no_void_action(const struct rte_flow_action actions[],
119                 const struct rte_flow_action *cur)
120 {
121         const struct rte_flow_action *next =
122                 cur ? cur + 1 : &actions[0];
123         while (1) {
124                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
125                         return next;
126                 next++;
127         }
128 }
129
130 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
131                                         struct rte_flow_error *error)
132 {
133         /* Must be input direction */
134         if (!attr->ingress) {
135                 rte_flow_error_set(error, EINVAL,
136                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
137                         attr, "Only support ingress.");
138                 return -rte_errno;
139         }
140
141         if (attr->egress) {
142                 rte_flow_error_set(error, EINVAL,
143                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
144                                 attr, "Not support egress.");
145                 return -rte_errno;
146         }
147
148         if (attr->priority) {
149                 rte_flow_error_set(error, EINVAL,
150                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
151                                 attr, "Not support priority.");
152                 return -rte_errno;
153         }
154
155         if (attr->group) {
156                 rte_flow_error_set(error, EINVAL,
157                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
158                                 attr, "Not support group.");
159                 return -rte_errno;
160         }
161
162         return 0;
163 }
164
165 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
166                                 const struct rte_flow_item *pattern,
167                                 const struct rte_flow_action *actions,
168                                 struct rte_flow_error *error)
169 {
170         if (!pattern) {
171                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
172                                 NULL, "NULL pattern.");
173                 return -rte_errno;
174         }
175
176         if (!actions) {
177                 rte_flow_error_set(error, EINVAL,
178                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
179                                 NULL, "NULL action.");
180                 return -rte_errno;
181         }
182
183         if (!attr) {
184                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
185                                    NULL, "NULL attribute.");
186                 return -rte_errno;
187         }
188
189         return 0;
190 }
191
192 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
193                                         struct rte_flow_error *error)
194 {
195         /* The first non-void item should be MAC */
196         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
197                 rte_flow_error_set(error, EINVAL,
198                         RTE_FLOW_ERROR_TYPE_ITEM,
199                         item, "Not supported by ethertype filter");
200                 return -rte_errno;
201         }
202
203         /* Not supported last point for range */
204         if (item->last) {
205                 rte_flow_error_set(error, EINVAL,
206                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
207                         item, "Not supported last point for range");
208                 return -rte_errno;
209         }
210
211         /* Get the MAC info. */
212         if (!item->spec || !item->mask) {
213                 rte_flow_error_set(error, EINVAL,
214                                 RTE_FLOW_ERROR_TYPE_ITEM,
215                                 item, "Not supported by ethertype filter");
216                 return -rte_errno;
217         }
218         return 0;
219 }
220
221 static int
222 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
223                         const struct rte_flow_action *act,
224                         const struct rte_flow_action_queue *act_q,
225                         struct rte_eth_ethertype_filter *filter,
226                         struct rte_flow_error *error)
227 {
228         /* Parse action */
229         act = next_no_void_action(actions, NULL);
230         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
231                 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
232                 rte_flow_error_set(error, EINVAL,
233                                 RTE_FLOW_ERROR_TYPE_ACTION,
234                                 act, "Not supported action.");
235                 return -rte_errno;
236         }
237
238         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
239                 act_q = (const struct rte_flow_action_queue *)act->conf;
240                 filter->queue = act_q->index;
241         } else {
242                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
243         }
244
245         /* Check if the next non-void item is END */
246         act = next_no_void_action(actions, act);
247         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
248                 rte_flow_error_set(error, EINVAL,
249                                 RTE_FLOW_ERROR_TYPE_ACTION,
250                                 act, "Not supported action.");
251                 return -rte_errno;
252         }
253
254         return 0;
255 }
256
257 /**
258  * Parse the rule to see if it is a ethertype rule.
259  * And get the ethertype filter info BTW.
260  * pattern:
261  * The first not void item can be ETH.
262  * The next not void item must be END.
263  * action:
264  * The first not void action should be QUEUE.
265  * The next not void action should be END.
266  * pattern example:
267  * ITEM         Spec                    Mask
268  * ETH          type    0x0807          0xFFFF
269  * END
270  * other members in mask and spec should set to 0x00.
271  * item->last should be NULL.
272  */
273 static int
274 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
275                         const struct rte_flow_item *pattern,
276                         const struct rte_flow_action *actions,
277                         struct rte_eth_ethertype_filter *filter,
278                         struct rte_flow_error *error)
279 {
280         const struct rte_flow_item *item;
281         const struct rte_flow_action *act = NULL;
282         const struct rte_flow_item_eth *eth_spec;
283         const struct rte_flow_item_eth *eth_mask;
284         const struct rte_flow_action_queue *act_q = NULL;
285
286         if (hinic_check_filter_arg(attr, pattern, actions, error))
287                 return -rte_errno;
288
289         item = next_no_void_pattern(pattern, NULL);
290         if (hinic_check_ethertype_first_item(item, error))
291                 return -rte_errno;
292
293         eth_spec = (const struct rte_flow_item_eth *)item->spec;
294         eth_mask = (const struct rte_flow_item_eth *)item->mask;
295
296         /*
297          * Mask bits of source MAC address must be full of 0.
298          * Mask bits of destination MAC address must be full
299          * of 1 or full of 0.
300          */
301         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
302             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
303              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
304                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
305                                 item, "Invalid ether address mask");
306                 return -rte_errno;
307         }
308
309         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
310                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
311                                 item, "Invalid ethertype mask");
312                 return -rte_errno;
313         }
314
315         /*
316          * If mask bits of destination MAC address
317          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
318          */
319         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
320                 filter->mac_addr = eth_spec->dst;
321                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
322         } else {
323                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
324         }
325         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
326
327         /* Check if the next non-void item is END. */
328         item = next_no_void_pattern(pattern, item);
329         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
330                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
331                         item, "Not supported by ethertype filter.");
332                 return -rte_errno;
333         }
334
335         if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
336                 return -rte_errno;
337
338         if (hinic_check_ethertype_attr_ele(attr, error))
339                 return -rte_errno;
340
341         return 0;
342 }
343
344 static int
345 hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
346                         const struct rte_flow_attr *attr,
347                         const struct rte_flow_item pattern[],
348                         const struct rte_flow_action actions[],
349                         struct rte_eth_ethertype_filter *filter,
350                         struct rte_flow_error *error)
351 {
352         if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
353                 return -rte_errno;
354
355         /* NIC doesn't support MAC address. */
356         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
357                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
358                 rte_flow_error_set(error, EINVAL,
359                         RTE_FLOW_ERROR_TYPE_ITEM,
360                         NULL, "Not supported by ethertype filter");
361                 return -rte_errno;
362         }
363
364         if (filter->queue >= dev->data->nb_rx_queues) {
365                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
366                 rte_flow_error_set(error, EINVAL,
367                         RTE_FLOW_ERROR_TYPE_ITEM,
368                         NULL, "Queue index much too big");
369                 return -rte_errno;
370         }
371
372         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
373                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
374                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
375                 rte_flow_error_set(error, EINVAL,
376                         RTE_FLOW_ERROR_TYPE_ITEM,
377                         NULL, "IPv4/IPv6 not supported by ethertype filter");
378                 return -rte_errno;
379         }
380
381         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
382                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
383                 rte_flow_error_set(error, EINVAL,
384                         RTE_FLOW_ERROR_TYPE_ITEM,
385                         NULL, "Drop option is unsupported");
386                 return -rte_errno;
387         }
388
389         /* Hinic only support LACP/ARP for ether type */
390         if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
391                 filter->ether_type != RTE_ETHER_TYPE_ARP) {
392                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
393                 rte_flow_error_set(error, EINVAL,
394                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
395                         "only lacp/arp type supported by ethertype filter");
396                 return -rte_errno;
397         }
398
399         return 0;
400 }
401
402 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
403                                 struct rte_eth_ntuple_filter *filter,
404                                 struct rte_flow_error *error)
405 {
406         /* Must be input direction */
407         if (!attr->ingress) {
408                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
409                 rte_flow_error_set(error, EINVAL,
410                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
411                                    attr, "Only support ingress.");
412                 return -rte_errno;
413         }
414
415         if (attr->egress) {
416                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
417                 rte_flow_error_set(error, EINVAL,
418                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
419                                    attr, "Not support egress.");
420                 return -rte_errno;
421         }
422
423         if (attr->priority > 0xFFFF) {
424                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
425                 rte_flow_error_set(error, EINVAL,
426                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
427                                    attr, "Error priority.");
428                 return -rte_errno;
429         }
430
431         if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
432                     attr->priority > HINIC_MAX_N_TUPLE_PRIO)
433                 filter->priority = 1;
434         else
435                 filter->priority = (uint16_t)attr->priority;
436
437         return 0;
438 }
439
440 static int
441 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
442                         const struct rte_flow_action actions[],
443                         struct rte_eth_ntuple_filter *filter,
444                         struct rte_flow_error *error)
445 {
446         const struct rte_flow_action *act;
447         /*
448          * n-tuple only supports forwarding,
449          * check if the first not void action is QUEUE.
450          */
451         act = next_no_void_action(actions, NULL);
452         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
453                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
454                 rte_flow_error_set(error, EINVAL,
455                         RTE_FLOW_ERROR_TYPE_ACTION,
456                         act, "Flow action type is not QUEUE.");
457                 return -rte_errno;
458         }
459         filter->queue =
460                 ((const struct rte_flow_action_queue *)act->conf)->index;
461
462         /* Check if the next not void item is END */
463         act = next_no_void_action(actions, act);
464         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
465                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466                 rte_flow_error_set(error, EINVAL,
467                         RTE_FLOW_ERROR_TYPE_ACTION,
468                         act, "Next not void item is not END.");
469                 return -rte_errno;
470         }
471
472         return 0;
473 }
474
475 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
476                                         const struct rte_flow_item pattern[],
477                                         struct rte_flow_error *error)
478 {
479         const struct rte_flow_item *item;
480
481         /* The first not void item can be MAC or IPv4 */
482         item = next_no_void_pattern(pattern, NULL);
483
484         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
485                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
486                 rte_flow_error_set(error, EINVAL,
487                         RTE_FLOW_ERROR_TYPE_ITEM,
488                         item, "Not supported by ntuple filter");
489                 return -rte_errno;
490         }
491
492         /* Skip Ethernet */
493         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
494                 /* Not supported last point for range */
495                 if (item->last) {
496                         rte_flow_error_set(error,
497                                 EINVAL,
498                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
499                                 item, "Not supported last point for range");
500                         return -rte_errno;
501                 }
502                 /* if the first item is MAC, the content should be NULL */
503                 if (item->spec || item->mask) {
504                         rte_flow_error_set(error, EINVAL,
505                                 RTE_FLOW_ERROR_TYPE_ITEM,
506                                 item, "Not supported by ntuple filter");
507                         return -rte_errno;
508                 }
509                 /* check if the next not void item is IPv4 */
510                 item = next_no_void_pattern(pattern, item);
511                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
512                         rte_flow_error_set(error,
513                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
514                                 item, "Not supported by ntuple filter");
515                         return -rte_errno;
516                 }
517         }
518
519         *ipv4_item = item;
520         return 0;
521 }
522
523 static int
524 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
525                         const struct rte_flow_item pattern[],
526                         struct rte_eth_ntuple_filter *filter,
527                         struct rte_flow_error *error)
528 {
529         const struct rte_flow_item_ipv4 *ipv4_spec;
530         const struct rte_flow_item_ipv4 *ipv4_mask;
531         const struct rte_flow_item *item = *in_out_item;
532
533         /* Get the IPv4 info */
534         if (!item->spec || !item->mask) {
535                 rte_flow_error_set(error, EINVAL,
536                         RTE_FLOW_ERROR_TYPE_ITEM,
537                         item, "Invalid ntuple mask");
538                 return -rte_errno;
539         }
540         /* Not supported last point for range */
541         if (item->last) {
542                 rte_flow_error_set(error, EINVAL,
543                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
544                         item, "Not supported last point for range");
545                 return -rte_errno;
546         }
547
548         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
549         /*
550          * Only support src & dst addresses, protocol,
551          * others should be masked.
552          */
553         if (ipv4_mask->hdr.version_ihl ||
554                 ipv4_mask->hdr.type_of_service ||
555                 ipv4_mask->hdr.total_length ||
556                 ipv4_mask->hdr.packet_id ||
557                 ipv4_mask->hdr.fragment_offset ||
558                 ipv4_mask->hdr.time_to_live ||
559                 ipv4_mask->hdr.hdr_checksum ||
560                 !ipv4_mask->hdr.next_proto_id) {
561                 rte_flow_error_set(error,
562                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
563                         item, "Not supported by ntuple filter");
564                 return -rte_errno;
565         }
566
567         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
568         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
569         filter->proto_mask = ipv4_mask->hdr.next_proto_id;
570
571         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
572         filter->dst_ip = ipv4_spec->hdr.dst_addr;
573         filter->src_ip = ipv4_spec->hdr.src_addr;
574         filter->proto  = ipv4_spec->hdr.next_proto_id;
575
576         /* Get next no void item */
577         *in_out_item = next_no_void_pattern(pattern, item);
578         return 0;
579 }
580
581 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
582                                 const struct rte_flow_item pattern[],
583                                 struct rte_eth_ntuple_filter *filter,
584                                 struct rte_flow_error *error)
585 {
586         const struct rte_flow_item_tcp *tcp_spec;
587         const struct rte_flow_item_tcp *tcp_mask;
588         const struct rte_flow_item_icmp *icmp_mask;
589         const struct rte_flow_item *item = *in_out_item;
590         u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
591
592         if (item->type == RTE_FLOW_ITEM_TYPE_END)
593                 return 0;
594
595         /* Get TCP or UDP info */
596         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
597                 (!item->spec || !item->mask)) {
598                 memset(filter, 0, ntuple_filter_size);
599                 rte_flow_error_set(error, EINVAL,
600                         RTE_FLOW_ERROR_TYPE_ITEM,
601                         item, "Invalid ntuple mask");
602                 return -rte_errno;
603         }
604
605         /* Not supported last point for range */
606         if (item->last) {
607                 memset(filter, 0, ntuple_filter_size);
608                 rte_flow_error_set(error, EINVAL,
609                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
610                         item, "Not supported last point for range");
611                 return -rte_errno;
612         }
613
614         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
615                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
616
617                 /*
618                  * Only support src & dst ports, tcp flags,
619                  * others should be masked.
620                  */
621                 if (tcp_mask->hdr.sent_seq ||
622                         tcp_mask->hdr.recv_ack ||
623                         tcp_mask->hdr.data_off ||
624                         tcp_mask->hdr.rx_win ||
625                         tcp_mask->hdr.cksum ||
626                         tcp_mask->hdr.tcp_urp) {
627                         memset(filter, 0, ntuple_filter_size);
628                         rte_flow_error_set(error, EINVAL,
629                                 RTE_FLOW_ERROR_TYPE_ITEM,
630                                 item, "Not supported by ntuple filter");
631                         return -rte_errno;
632                 }
633
634                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
635                 filter->src_port_mask  = tcp_mask->hdr.src_port;
636                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
637                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
638                 } else if (!tcp_mask->hdr.tcp_flags) {
639                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
640                 } else {
641                         memset(filter, 0, ntuple_filter_size);
642                         rte_flow_error_set(error, EINVAL,
643                                 RTE_FLOW_ERROR_TYPE_ITEM,
644                                 item, "Not supported by ntuple filter");
645                         return -rte_errno;
646                 }
647
648                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
649                 filter->dst_port  = tcp_spec->hdr.dst_port;
650                 filter->src_port  = tcp_spec->hdr.src_port;
651                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
652         } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
653                 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
654
655                 /* ICMP all should be masked. */
656                 if (icmp_mask->hdr.icmp_cksum ||
657                         icmp_mask->hdr.icmp_ident ||
658                         icmp_mask->hdr.icmp_seq_nb ||
659                         icmp_mask->hdr.icmp_type ||
660                         icmp_mask->hdr.icmp_code) {
661                         memset(filter, 0, ntuple_filter_size);
662                         rte_flow_error_set(error, EINVAL,
663                                 RTE_FLOW_ERROR_TYPE_ITEM,
664                                 item, "Not supported by ntuple filter");
665                         return -rte_errno;
666                 }
667         }
668
669         /* Get next no void item */
670         *in_out_item = next_no_void_pattern(pattern, item);
671         return 0;
672 }
673
674 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
675                                         struct rte_eth_ntuple_filter *filter,
676                                         struct rte_flow_error *error)
677 {
678         /* Check if the next not void item is END */
679         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
680                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
681                 rte_flow_error_set(error, EINVAL,
682                         RTE_FLOW_ERROR_TYPE_ITEM,
683                         item, "Not supported by ntuple filter");
684                 return -rte_errno;
685         }
686         return 0;
687 }
688
689 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
690                                         const struct rte_flow_item pattern[],
691                                         struct rte_eth_ntuple_filter *filter,
692                                         struct rte_flow_error *error)
693 {
694         if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
695                 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
696                 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
697                 hinic_ntuple_item_check_end(item, filter, error))
698                 return -rte_errno;
699
700         return 0;
701 }
702
703 /**
704  * Parse the rule to see if it is a n-tuple rule.
705  * And get the n-tuple filter info BTW.
706  * pattern:
707  * The first not void item can be ETH or IPV4.
708  * The second not void item must be IPV4 if the first one is ETH.
709  * The third not void item must be UDP or TCP.
710  * The next not void item must be END.
711  * action:
712  * The first not void action should be QUEUE.
713  * The next not void action should be END.
714  * pattern example:
715  * ITEM         Spec                    Mask
716  * ETH          NULL                    NULL
717  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
718  *              dst_addr 192.167.3.50   0xFFFFFFFF
719  *              next_proto_id   17      0xFF
720  * UDP/TCP/     src_port        80      0xFFFF
721  * SCTP         dst_port        80      0xFFFF
722  * END
723  * other members in mask and spec should set to 0x00.
724  * item->last should be NULL.
725  * Please aware there's an asumption for all the parsers.
726  * rte_flow_item is using big endian, rte_flow_attr and
727  * rte_flow_action are using CPU order.
728  * Because the pattern is used to describe the packets,
729  * normally the packets should use network order.
730  */
731 static int
732 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
733                         const struct rte_flow_item pattern[],
734                         const struct rte_flow_action actions[],
735                         struct rte_eth_ntuple_filter *filter,
736                         struct rte_flow_error *error)
737 {
738         const struct rte_flow_item *item = NULL;
739
740         if (hinic_check_filter_arg(attr, pattern, actions, error))
741                 return -rte_errno;
742
743         if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
744                 return -rte_errno;
745
746         if (hinic_check_ntuple_act_ele(item, actions, filter, error))
747                 return -rte_errno;
748
749         if (hinic_check_ntuple_attr_ele(attr, filter, error))
750                 return -rte_errno;
751
752         return 0;
753 }
754
755 static int
756 hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
757                         const struct rte_flow_attr *attr,
758                         const struct rte_flow_item pattern[],
759                         const struct rte_flow_action actions[],
760                         struct rte_eth_ntuple_filter *filter,
761                         struct rte_flow_error *error)
762 {
763         int ret;
764
765         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
766         if (ret)
767                 return ret;
768
769         /* Hinic doesn't support tcp flags */
770         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
771                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
772                 rte_flow_error_set(error, EINVAL,
773                                    RTE_FLOW_ERROR_TYPE_ITEM,
774                                    NULL, "Not supported by ntuple filter");
775                 return -rte_errno;
776         }
777
778         /* Hinic doesn't support many priorities */
779         if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
780             filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
781                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
782                 rte_flow_error_set(error, EINVAL,
783                         RTE_FLOW_ERROR_TYPE_ITEM,
784                         NULL, "Priority not supported by ntuple filter");
785                 return -rte_errno;
786         }
787
788         if (filter->queue >= dev->data->nb_rx_queues)
789                 return -rte_errno;
790
791         /* Fixed value for hinic */
792         filter->flags = RTE_5TUPLE_FLAGS;
793         return 0;
794 }
795
796 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
797                                         const struct rte_flow_item pattern[],
798                                         struct rte_flow_error *error)
799 {
800         const struct rte_flow_item *item;
801
802         /* The first not void item can be MAC or IPv4  or TCP or UDP */
803         item = next_no_void_pattern(pattern, NULL);
804
805         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
806                 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
807                 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
808                 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
809                 rte_flow_error_set(error, EINVAL,
810                         RTE_FLOW_ERROR_TYPE_ITEM, item,
811                         "Not supported by fdir filter,support mac,ipv4,tcp,udp");
812                 return -rte_errno;
813         }
814
815         /* Not supported last point for range */
816         if (item->last) {
817                 rte_flow_error_set(error, EINVAL,
818                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
819                         "Not supported last point for range");
820                 return -rte_errno;
821         }
822
823         /* Skip Ethernet */
824         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
825                 /* All should be masked. */
826                 if (item->spec || item->mask) {
827                         rte_flow_error_set(error, EINVAL,
828                                 RTE_FLOW_ERROR_TYPE_ITEM,
829                                 item, "Not supported by fdir filter,support mac");
830                         return -rte_errno;
831                 }
832                 /* Check if the next not void item is IPv4 */
833                 item = next_no_void_pattern(pattern, item);
834                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
835                         rte_flow_error_set(error, EINVAL,
836                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
837                                 "Not supported by fdir filter,support mac,ipv4");
838                         return -rte_errno;
839                 }
840         }
841
842         *ip_item = item;
843         return 0;
844 }
845
846 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
847                                 const struct rte_flow_item pattern[],
848                                 struct hinic_fdir_rule *rule,
849                                 struct rte_flow_error *error)
850 {
851         const struct rte_flow_item_ipv4 *ipv4_spec;
852         const struct rte_flow_item_ipv4 *ipv4_mask;
853         const struct rte_flow_item *item = *in_out_item;
854
855         /* Get the IPv4 info */
856         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
857                 /* Not supported last point for range */
858                 if (item->last) {
859                         rte_flow_error_set(error, EINVAL,
860                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
861                                 item, "Not supported last point for range");
862                         return -rte_errno;
863                 }
864
865                 if (!item->mask) {
866                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
867                         rte_flow_error_set(error, EINVAL,
868                                 RTE_FLOW_ERROR_TYPE_ITEM,
869                                 item, "Invalid fdir filter mask");
870                         return -rte_errno;
871                 }
872
873                 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
874                 /*
875                  * Only support src & dst addresses,
876                  * others should be masked.
877                  */
878                 if (ipv4_mask->hdr.version_ihl ||
879                         ipv4_mask->hdr.type_of_service ||
880                         ipv4_mask->hdr.total_length ||
881                         ipv4_mask->hdr.packet_id ||
882                         ipv4_mask->hdr.fragment_offset ||
883                         ipv4_mask->hdr.time_to_live ||
884                         ipv4_mask->hdr.next_proto_id ||
885                         ipv4_mask->hdr.hdr_checksum) {
886                         rte_flow_error_set(error,
887                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
888                                 "Not supported by fdir filter, support src,dst ip");
889                         return -rte_errno;
890                 }
891
892                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
893                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
894
895                 if (item->spec) {
896                         ipv4_spec =
897                                 (const struct rte_flow_item_ipv4 *)item->spec;
898                         rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
899                         rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
900                 }
901
902                 /*
903                  * Check if the next not void item is
904                  * TCP or UDP or END.
905                  */
906                 item = next_no_void_pattern(pattern, item);
907                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
908                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
909                     item->type != RTE_FLOW_ITEM_TYPE_END) {
910                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
911                         rte_flow_error_set(error, EINVAL,
912                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
913                                 "Not supported by fdir filter, support tcp, udp, end");
914                         return -rte_errno;
915                 }
916         }
917
918         *in_out_item = item;
919         return 0;
920 }
921
922 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
923                                 const struct rte_flow_item pattern[],
924                                 struct hinic_fdir_rule *rule,
925                                 struct rte_flow_error *error)
926 {
927         const struct rte_flow_item_tcp *tcp_spec;
928         const struct rte_flow_item_tcp *tcp_mask;
929         const struct rte_flow_item_udp *udp_spec;
930         const struct rte_flow_item_udp *udp_mask;
931         const struct rte_flow_item *item = *in_out_item;
932
933         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
934                 /* Not supported last point for range */
935                 if (item->last) {
936                         rte_flow_error_set(error, EINVAL,
937                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
938                                 item, "Not supported last point for range");
939                         return -rte_errno;
940                 }
941
942                 /* Get TCP/UDP info */
943                 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
944                         /*
945                          * Only care about src & dst ports,
946                          * others should be masked.
947                          */
948                         if (!item->mask) {
949                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
950                                 rte_flow_error_set(error, EINVAL,
951                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
952                                         "Not supported by fdir filter,support src,dst ports");
953                                 return -rte_errno;
954                         }
955
956                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
957                         if (tcp_mask->hdr.sent_seq ||
958                                 tcp_mask->hdr.recv_ack ||
959                                 tcp_mask->hdr.data_off ||
960                                 tcp_mask->hdr.tcp_flags ||
961                                 tcp_mask->hdr.rx_win ||
962                                 tcp_mask->hdr.cksum ||
963                                 tcp_mask->hdr.tcp_urp) {
964                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
965                                 rte_flow_error_set(error, EINVAL,
966                                         RTE_FLOW_ERROR_TYPE_ITEM,
967                                         item, "Not supported by fdir filter,support tcp");
968                                 return -rte_errno;
969                         }
970
971                         rule->mask.src_port_mask = tcp_mask->hdr.src_port;
972                         rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
973
974                         if (item->spec) {
975                                 tcp_spec =
976                                         (const struct rte_flow_item_tcp *)
977                                         item->spec;
978                                 rule->hinic_fdir.src_port =
979                                         tcp_spec->hdr.src_port;
980                                 rule->hinic_fdir.dst_port =
981                                         tcp_spec->hdr.dst_port;
982                         }
983
984                 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
985                         /*
986                          * Only care about src & dst ports,
987                          * others should be masked.
988                          */
989                         if (!item->mask) {
990                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
991                                 rte_flow_error_set(error, EINVAL,
992                                         RTE_FLOW_ERROR_TYPE_ITEM,
993                                         item, "Not supported by fdir filter,support src,dst ports");
994                                 return -rte_errno;
995                         }
996
997                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
998                         if (udp_mask->hdr.dgram_len ||
999                             udp_mask->hdr.dgram_cksum) {
1000                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1001                                 rte_flow_error_set(error, EINVAL,
1002                                         RTE_FLOW_ERROR_TYPE_ITEM,
1003                                         item, "Not supported by fdir filter,support udp");
1004                                 return -rte_errno;
1005                         }
1006                         rule->mask.src_port_mask = udp_mask->hdr.src_port;
1007                         rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1008
1009                         if (item->spec) {
1010                                 udp_spec =
1011                                         (const struct rte_flow_item_udp *)
1012                                         item->spec;
1013                                 rule->hinic_fdir.src_port =
1014                                         udp_spec->hdr.src_port;
1015                                 rule->hinic_fdir.dst_port =
1016                                         udp_spec->hdr.dst_port;
1017                         }
1018                 } else {
1019                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1020                         rte_flow_error_set(error, EINVAL,
1021                                 RTE_FLOW_ERROR_TYPE_ITEM,
1022                                 item, "Not supported by fdir filter,support tcp/udp");
1023                         return -rte_errno;
1024                 }
1025
1026                 /* Get next no void item */
1027                 *in_out_item = next_no_void_pattern(pattern, item);
1028         }
1029
1030         return 0;
1031 }
1032
1033 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
1034                                         struct hinic_fdir_rule *rule,
1035                                         struct rte_flow_error *error)
1036 {
1037         /* Check if the next not void item is END */
1038         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1039                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1040                 rte_flow_error_set(error, EINVAL,
1041                         RTE_FLOW_ERROR_TYPE_ITEM,
1042                         item, "Not supported by fdir filter,support end");
1043                 return -rte_errno;
1044         }
1045
1046         return 0;
1047 }
1048
1049 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
1050                                         const struct rte_flow_item pattern[],
1051                                         struct hinic_fdir_rule *rule,
1052                                         struct rte_flow_error *error)
1053 {
1054         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1055                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1056                 hinic_normal_item_check_l4(&item, pattern, rule, error) ||
1057                 hinic_normal_item_check_end(item, rule, error))
1058                 return -rte_errno;
1059
1060         return 0;
1061 }
1062
1063 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1064                                         struct hinic_fdir_rule *rule,
1065                                         struct rte_flow_error *error)
1066 {
1067         /* Must be input direction */
1068         if (!attr->ingress) {
1069                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1070                 rte_flow_error_set(error, EINVAL,
1071                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1072                                    attr, "Only support ingress.");
1073                 return -rte_errno;
1074         }
1075
1076         /* Not supported */
1077         if (attr->egress) {
1078                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1079                 rte_flow_error_set(error, EINVAL,
1080                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1081                                    attr, "Not support egress.");
1082                 return -rte_errno;
1083         }
1084
1085         /* Not supported */
1086         if (attr->priority) {
1087                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1088                 rte_flow_error_set(error, EINVAL,
1089                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1090                         attr, "Not support priority.");
1091                 return -rte_errno;
1092         }
1093
1094         return 0;
1095 }
1096
1097 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1098                                 const struct rte_flow_action actions[],
1099                                 struct hinic_fdir_rule *rule,
1100                                 struct rte_flow_error *error)
1101 {
1102         const struct rte_flow_action *act;
1103
1104         /* Check if the first not void action is QUEUE */
1105         act = next_no_void_action(actions, NULL);
1106         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1107                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1108                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1109                         item, "Not supported action.");
1110                 return -rte_errno;
1111         }
1112
1113         rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1114
1115         /* Check if the next not void item is END */
1116         act = next_no_void_action(actions, act);
1117         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1118                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1119                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1120                         act, "Not supported action.");
1121                 return -rte_errno;
1122         }
1123
1124         return 0;
1125 }
1126
1127 /**
1128  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1129  * And get the flow director filter info BTW.
1130  * UDP/TCP/SCTP PATTERN:
1131  * The first not void item can be ETH or IPV4 or IPV6
1132  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1133  * The next not void item could be UDP or TCP(optional)
1134  * The next not void item must be END.
1135  * ACTION:
1136  * The first not void action should be QUEUE.
1137  * The second not void optional action should be MARK,
1138  * mark_id is a uint32_t number.
1139  * The next not void action should be END.
1140  * UDP/TCP pattern example:
1141  * ITEM          Spec                                       Mask
1142  * ETH            NULL                                    NULL
1143  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1144  *                   dst_addr  1.2.3.5                 0xFFFFFFFF
1145  * UDP/TCP    src_port  80                         0xFFFF
1146  *                   dst_port  80                         0xFFFF
1147  * END
1148  * Other members in mask and spec should set to 0x00.
1149  * Item->last should be NULL.
1150  */
1151 static int
1152 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1153                                const struct rte_flow_item pattern[],
1154                                const struct rte_flow_action actions[],
1155                                struct hinic_fdir_rule *rule,
1156                                struct rte_flow_error *error)
1157 {
1158         const struct rte_flow_item *item = NULL;
1159
1160         if (hinic_check_filter_arg(attr, pattern, actions, error))
1161                 return -rte_errno;
1162
1163         if (hinic_check_normal_item_ele(item, pattern, rule, error))
1164                 return -rte_errno;
1165
1166         if (hinic_check_normal_attr_ele(attr, rule, error))
1167                 return -rte_errno;
1168
1169         if (hinic_check_normal_act_ele(item, actions, rule, error))
1170                 return -rte_errno;
1171
1172         return 0;
1173 }
1174
1175 static int
1176 hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1177                         const struct rte_flow_attr *attr,
1178                         const struct rte_flow_item pattern[],
1179                         const struct rte_flow_action actions[],
1180                         struct hinic_fdir_rule *rule,
1181                         struct rte_flow_error *error)
1182 {
1183         int ret;
1184
1185         ret = hinic_parse_fdir_filter_normal(attr, pattern,
1186                                                 actions, rule, error);
1187         if (ret)
1188                 return ret;
1189
1190         if (rule->queue >= dev->data->nb_rx_queues)
1191                 return -ENOTSUP;
1192
1193         return ret;
1194 }
1195
1196 /**
1197  * Check if the flow rule is supported by nic.
1198  * It only checkes the format. Don't guarantee the rule can be programmed into
1199  * the HW. Because there can be no enough room for the rule.
1200  */
1201 static int hinic_flow_validate(struct rte_eth_dev *dev,
1202                                 const struct rte_flow_attr *attr,
1203                                 const struct rte_flow_item pattern[],
1204                                 const struct rte_flow_action actions[],
1205                                 struct rte_flow_error *error)
1206 {
1207         struct rte_eth_ethertype_filter ethertype_filter;
1208         struct rte_eth_ntuple_filter ntuple_filter;
1209         struct hinic_fdir_rule fdir_rule;
1210         int ret;
1211
1212         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1213         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1214                                 actions, &ntuple_filter, error);
1215         if (!ret)
1216                 return 0;
1217
1218         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1219         ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1220                                 actions, &ethertype_filter, error);
1221
1222         if (!ret)
1223                 return 0;
1224
1225         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1226         ret = hinic_parse_fdir_filter(dev, attr, pattern,
1227                                 actions, &fdir_rule, error);
1228
1229         return ret;
1230 }
1231
1232 static inline int
1233 ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1234                  struct hinic_5tuple_filter_info *filter_info)
1235 {
1236         switch (filter->dst_ip_mask) {
1237         case UINT32_MAX:
1238                 filter_info->dst_ip_mask = 0;
1239                 filter_info->dst_ip = filter->dst_ip;
1240                 break;
1241         case 0:
1242                 filter_info->dst_ip_mask = 1;
1243                 filter_info->dst_ip = 0;
1244                 break;
1245         default:
1246                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1247                 return -EINVAL;
1248         }
1249
1250         switch (filter->src_ip_mask) {
1251         case UINT32_MAX:
1252                 filter_info->src_ip_mask = 0;
1253                 filter_info->src_ip = filter->src_ip;
1254                 break;
1255         case 0:
1256                 filter_info->src_ip_mask = 1;
1257                 filter_info->src_ip = 0;
1258                 break;
1259         default:
1260                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1261                 return -EINVAL;
1262         }
1263         return 0;
1264 }
1265
1266 static inline int
1267 ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1268                    struct hinic_5tuple_filter_info *filter_info)
1269 {
1270         switch (filter->dst_port_mask) {
1271         case UINT16_MAX:
1272                 filter_info->dst_port_mask = 0;
1273                 filter_info->dst_port = filter->dst_port;
1274                 break;
1275         case 0:
1276                 filter_info->dst_port_mask = 1;
1277                 filter_info->dst_port = 0;
1278                 break;
1279         default:
1280                 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1281                 return -EINVAL;
1282         }
1283
1284         switch (filter->src_port_mask) {
1285         case UINT16_MAX:
1286                 filter_info->src_port_mask = 0;
1287                 filter_info->src_port = filter->src_port;
1288                 break;
1289         case 0:
1290                 filter_info->src_port_mask = 1;
1291                 filter_info->src_port = 0;
1292                 break;
1293         default:
1294                 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1295                 return -EINVAL;
1296         }
1297
1298         return 0;
1299 }
1300
1301 static inline int
1302 ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1303                     struct hinic_5tuple_filter_info *filter_info)
1304 {
1305         switch (filter->proto_mask) {
1306         case UINT8_MAX:
1307                 filter_info->proto_mask = 0;
1308                 filter_info->proto = filter->proto;
1309                 break;
1310         case 0:
1311                 filter_info->proto_mask = 1;
1312                 filter_info->proto = 0;
1313                 break;
1314         default:
1315                 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1316                 return -EINVAL;
1317         }
1318
1319         return 0;
1320 }
1321
1322 static inline int
1323 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1324                         struct hinic_5tuple_filter_info *filter_info)
1325 {
1326         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1327                 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1328                 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1329                 return -EINVAL;
1330
1331         if (ntuple_ip_filter(filter, filter_info) ||
1332                 ntuple_port_filter(filter, filter_info) ||
1333                 ntuple_proto_filter(filter, filter_info))
1334                 return -EINVAL;
1335
1336         filter_info->priority = (uint8_t)filter->priority;
1337         return 0;
1338 }
1339
1340 static inline struct hinic_5tuple_filter *
1341 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1342                            struct hinic_5tuple_filter_info *key)
1343 {
1344         struct hinic_5tuple_filter *it;
1345
1346         TAILQ_FOREACH(it, filter_list, entries) {
1347                 if (memcmp(key, &it->filter_info,
1348                         sizeof(struct hinic_5tuple_filter_info)) == 0) {
1349                         return it;
1350                 }
1351         }
1352
1353         return NULL;
1354 }
1355
1356 static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
1357 {
1358         struct tag_pa_rule lacp_rule;
1359         struct tag_pa_action lacp_action;
1360
1361         memset(&lacp_rule, 0, sizeof(lacp_rule));
1362         memset(&lacp_action, 0, sizeof(lacp_action));
1363         /* LACP TCAM rule */
1364         lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
1365         lacp_rule.l2_header.eth_type.val16 = 0x8809;
1366         lacp_rule.l2_header.eth_type.mask16 = 0xffff;
1367
1368         /* LACP TCAM action */
1369         lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
1370         lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1371         lacp_action.pkt_type = PKT_LACP_TYPE;
1372         lacp_action.pri = 0x0;
1373         lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
1374
1375         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
1376                                         &lacp_rule, &lacp_action);
1377 }
1378
1379 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1380 {
1381         struct tag_pa_rule bgp_rule;
1382         struct tag_pa_action bgp_action;
1383
1384         memset(&bgp_rule, 0, sizeof(bgp_rule));
1385         memset(&bgp_action, 0, sizeof(bgp_action));
1386         /* BGP TCAM rule */
1387         bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1388         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1389         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1390         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1391         bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1392         bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1393
1394         /* BGP TCAM action */
1395         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1396         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1397         bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1398         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1399                                * results, not need to convert
1400                                */
1401         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1402
1403         return hinic_set_fdir_tcam(nic_dev->hwdev,
1404                         TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1405 }
1406
1407 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1408 {
1409         struct tag_pa_rule bgp_rule;
1410         struct tag_pa_action bgp_action;
1411
1412         memset(&bgp_rule, 0, sizeof(bgp_rule));
1413         memset(&bgp_action, 0, sizeof(bgp_action));
1414         /* BGP TCAM rule */
1415         bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1416         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1417         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1418         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1419         bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1420         bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1421
1422         /* BGP TCAM action */
1423         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1424         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1425         bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1426         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1427                                * results, not need to convert
1428                                */
1429         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1430
1431         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1432                                         &bgp_rule, &bgp_action);
1433 }
1434
1435 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1436 {
1437         struct tag_pa_rule vrrp_rule;
1438         struct tag_pa_action vrrp_action;
1439
1440         memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1441         memset(&vrrp_action, 0, sizeof(vrrp_action));
1442         /* VRRP TCAM rule */
1443         vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1444         vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1445         vrrp_rule.ip_header.protocol.mask8 = 0xff;
1446         vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1447
1448         /* VRRP TCAM action */
1449         vrrp_action.err_type = 0x3f;
1450         vrrp_action.fwd_action = 0x7;
1451         vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1452         vrrp_action.pri = 0xf;
1453         vrrp_action.push_len = 0xf;
1454
1455         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1456                                         &vrrp_rule, &vrrp_action);
1457 }
1458
1459 /**
1460  *  Clear all fdir configuration.
1461  *
1462  * @param nic_dev
1463  *   The hardware interface of a Ethernet device.
1464  *
1465  * @return
1466  *   0 on success,
1467  *   negative error value otherwise.
1468  */
1469 void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
1470 {
1471         struct hinic_filter_info *filter_info =
1472                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
1473
1474         if (filter_info->type_mask &
1475             (1 << HINIC_PKT_TYPE_FIND_ID(PKT_BGPD_DPORT_TYPE)))
1476                 hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
1477
1478         if (filter_info->type_mask &
1479             (1 << HINIC_PKT_TYPE_FIND_ID(PKT_BGPD_SPORT_TYPE)))
1480                 hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
1481
1482         if (filter_info->type_mask &
1483             (1 << HINIC_PKT_TYPE_FIND_ID(PKT_VRRP_TYPE)))
1484                 hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1485
1486         if (filter_info->type_mask &
1487             (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE)))
1488                 hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
1489
1490         hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
1491 }
1492
1493 static int
1494 hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1495                        struct hinic_filter_info *filter_info)
1496 {
1497         switch (filter->filter_info.proto) {
1498         case IPPROTO_TCP:
1499                 /* Filter type is bgp type if dst_port or src_port is 179 */
1500                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1501                         !(filter->filter_info.dst_port_mask)) {
1502                         filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1503                 } else if (filter->filter_info.src_port ==
1504                         RTE_BE16(BGP_DPORT_ID) &&
1505                         !(filter->filter_info.src_port_mask)) {
1506                         filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1507                 } else {
1508                         PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1509                         " just support BGP now, proto:0x%x, "
1510                         "dst_port:0x%x, dst_port_mask:0x%x."
1511                         "src_port:0x%x, src_port_mask:0x%x.",
1512                         filter->filter_info.proto,
1513                         filter->filter_info.dst_port,
1514                         filter->filter_info.dst_port_mask,
1515                         filter->filter_info.src_port,
1516                         filter->filter_info.src_port_mask);
1517                         return -EINVAL;
1518                 }
1519                 break;
1520
1521         case IPPROTO_VRRP:
1522                 filter_info->pkt_type = PKT_VRRP_TYPE;
1523                 break;
1524
1525         case IPPROTO_ICMP:
1526                 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1527                 break;
1528
1529         case IPPROTO_ICMPV6:
1530                 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1531                 break;
1532
1533         default:
1534                 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1535                 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1536                 "src_port: 0x%x, src_port_mask: 0x%x.",
1537                 filter->filter_info.proto, filter->filter_info.dst_port,
1538                 filter->filter_info.dst_port_mask,
1539                 filter->filter_info.src_port,
1540                 filter->filter_info.src_port_mask);
1541                 return -EINVAL;
1542         }
1543
1544         return 0;
1545 }
1546
1547 static int
1548 hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1549                         struct hinic_filter_info *filter_info,
1550                         int *index)
1551 {
1552         int type_id;
1553
1554         type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1555
1556         if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1557                 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1558                 return -EINVAL;
1559         }
1560
1561         if (!(filter_info->type_mask & (1 << type_id))) {
1562                 filter_info->type_mask |= 1 << type_id;
1563                 filter->index = type_id;
1564                 filter_info->pkt_filters[type_id].enable = true;
1565                 filter_info->pkt_filters[type_id].pkt_proto =
1566                                                 filter->filter_info.proto;
1567                 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1568                                   filter, entries);
1569         } else {
1570                 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1571                 return -EIO;
1572         }
1573
1574         *index = type_id;
1575         return 0;
1576 }
1577
1578 /*
1579  * Add a 5tuple filter
1580  *
1581  * @param dev:
1582  *  Pointer to struct rte_eth_dev.
1583  * @param filter:
1584  *  Pointer to the filter that will be added.
1585  * @return
1586  *    - On success, zero.
1587  *    - On failure, a negative value.
1588  */
1589 static int
1590 hinic_add_5tuple_filter(struct rte_eth_dev *dev,
1591                         struct hinic_5tuple_filter *filter)
1592 {
1593         struct hinic_filter_info *filter_info =
1594                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1595         int i, ret_fw;
1596         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1597
1598         if (hinic_filter_info_init(filter, filter_info) ||
1599                 hinic_lookup_new_filter(filter, filter_info, &i))
1600                 return -EFAULT;
1601
1602         ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
1603                                         filter_info->qid,
1604                                         filter_info->pkt_filters[i].enable,
1605                                         true);
1606         if (ret_fw) {
1607                 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1608                         filter_info->pkt_type, filter->queue,
1609                         filter_info->pkt_filters[i].enable);
1610                 return -EFAULT;
1611         }
1612
1613         PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1614                         filter_info->pkt_type, filter_info->qid,
1615                         filter_info->pkt_filters[filter->index].enable);
1616
1617         switch (filter->filter_info.proto) {
1618         case IPPROTO_TCP:
1619                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
1620                         ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
1621                         if (ret_fw) {
1622                                 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
1623                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
1624                                         filter_info->pkt_type, filter->queue,
1625                                         filter_info->pkt_filters[i].enable);
1626                                 return -EFAULT;
1627                         }
1628
1629                         PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
1630                                 filter->queue,
1631                                 filter_info->pkt_filters[i].enable);
1632                 } else if (filter->filter_info.src_port ==
1633                         RTE_BE16(BGP_DPORT_ID)) {
1634                         ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
1635                         if (ret_fw) {
1636                                 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
1637                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
1638                                         filter_info->pkt_type, filter->queue,
1639                                         filter_info->pkt_filters[i].enable);
1640                                 return -EFAULT;
1641                         }
1642
1643                         PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
1644                                         filter->queue,
1645                                         filter_info->pkt_filters[i].enable);
1646                 }
1647
1648                 break;
1649
1650         case IPPROTO_VRRP:
1651                 ret_fw = hinic_set_vrrp_tcam(nic_dev);
1652                 if (ret_fw) {
1653                         PMD_DRV_LOG(ERR, "Set VRRP failed, "
1654                                 "type: 0x%x, qid: 0x%x, enable: 0x%x",
1655                                 filter_info->pkt_type, filter->queue,
1656                                 filter_info->pkt_filters[i].enable);
1657                         return -EFAULT;
1658                 }
1659                 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
1660                                 filter->queue,
1661                                 filter_info->pkt_filters[i].enable);
1662                 break;
1663
1664         default:
1665                 break;
1666         }
1667
1668         return 0;
1669 }
1670
1671 /*
1672  * Remove a 5tuple filter
1673  *
1674  * @param dev
1675  *  Pointer to struct rte_eth_dev.
1676  * @param filter
1677  *  The pointer of the filter will be removed.
1678  */
1679 static void
1680 hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
1681                            struct hinic_5tuple_filter *filter)
1682 {
1683         struct hinic_filter_info *filter_info =
1684                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1685         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1686
1687         switch (filter->filter_info.proto) {
1688         case IPPROTO_VRRP:
1689                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1690                 break;
1691
1692         case IPPROTO_TCP:
1693                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
1694                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
1695                                                         TCAM_PKT_BGP_DPORT);
1696                 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
1697                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
1698                                                         TCAM_PKT_BGP_SPORT);
1699                 break;
1700
1701         default:
1702                 break;
1703         }
1704
1705         hinic_filter_info_init(filter, filter_info);
1706
1707         filter_info->pkt_filters[filter->index].enable = false;
1708         filter_info->pkt_filters[filter->index].pkt_proto = 0;
1709
1710         PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1711                 filter_info->pkt_type,
1712                 filter_info->pkt_filters[filter->index].qid,
1713                 filter_info->pkt_filters[filter->index].enable);
1714         (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
1715                                 filter_info->pkt_filters[filter->index].qid,
1716                                 filter_info->pkt_filters[filter->index].enable,
1717                                 true);
1718
1719         filter_info->pkt_type = 0;
1720         filter_info->qid = 0;
1721         filter_info->pkt_filters[filter->index].qid = 0;
1722         filter_info->type_mask &= ~(1 <<  (filter->index));
1723         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
1724
1725         rte_free(filter);
1726 }
1727
1728 /*
1729  * Add or delete a ntuple filter
1730  *
1731  * @param dev
1732  *  Pointer to struct rte_eth_dev.
1733  * @param ntuple_filter
1734  *  Pointer to struct rte_eth_ntuple_filter
1735  * @param add
1736  *  If true, add filter; if false, remove filter
1737  * @return
1738  *    - On success, zero.
1739  *    - On failure, a negative value.
1740  */
1741 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
1742                                 struct rte_eth_ntuple_filter *ntuple_filter,
1743                                 bool add)
1744 {
1745         struct hinic_filter_info *filter_info =
1746                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1747         struct hinic_5tuple_filter_info filter_5tuple;
1748         struct hinic_5tuple_filter *filter;
1749         int ret;
1750
1751         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
1752                 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
1753                 return -EINVAL;
1754         }
1755
1756         memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
1757         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
1758         if (ret < 0)
1759                 return ret;
1760
1761         filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
1762                                          &filter_5tuple);
1763         if (filter != NULL && add) {
1764                 PMD_DRV_LOG(ERR, "Filter exists.");
1765                 return -EEXIST;
1766         }
1767         if (filter == NULL && !add) {
1768                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
1769                 return -ENOENT;
1770         }
1771
1772         if (add) {
1773                 filter = rte_zmalloc("hinic_5tuple_filter",
1774                                 sizeof(struct hinic_5tuple_filter), 0);
1775                 if (filter == NULL)
1776                         return -ENOMEM;
1777                 rte_memcpy(&filter->filter_info, &filter_5tuple,
1778                                 sizeof(struct hinic_5tuple_filter_info));
1779                 filter->queue = ntuple_filter->queue;
1780
1781                 filter_info->qid = ntuple_filter->queue;
1782
1783                 ret = hinic_add_5tuple_filter(dev, filter);
1784                 if (ret)
1785                         rte_free(filter);
1786
1787                 return ret;
1788         }
1789
1790         hinic_remove_5tuple_filter(dev, filter);
1791
1792         return 0;
1793 }
1794
1795 static inline int
1796 hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
1797 {
1798         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
1799                 return -EINVAL;
1800
1801         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
1802                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
1803                 PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
1804                         " ethertype filter", filter->ether_type);
1805                 return -EINVAL;
1806         }
1807
1808         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
1809                 PMD_DRV_LOG(ERR, "Mac compare is not supported");
1810                 return -EINVAL;
1811         }
1812         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1813                 PMD_DRV_LOG(ERR, "Drop option is not supported");
1814                 return -EINVAL;
1815         }
1816
1817         return 0;
1818 }
1819
1820 static inline int
1821 hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
1822                               struct hinic_pkt_filter *ethertype_filter)
1823 {
1824         switch (ethertype_filter->pkt_proto) {
1825         case RTE_ETHER_TYPE_SLOW:
1826                 filter_info->pkt_type = PKT_LACP_TYPE;
1827                 break;
1828
1829         case RTE_ETHER_TYPE_ARP:
1830                 filter_info->pkt_type = PKT_ARP_TYPE;
1831                 break;
1832
1833         default:
1834                 PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
1835                 return -EIO;
1836         }
1837
1838         return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1839 }
1840
1841 static inline int
1842 hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
1843                               struct hinic_pkt_filter *ethertype_filter)
1844 {
1845         int id;
1846
1847         /* Find LACP or VRRP type id */
1848         id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
1849         if (id < 0)
1850                 return -EINVAL;
1851
1852         if (!(filter_info->type_mask & (1 << id))) {
1853                 filter_info->type_mask |= 1 << id;
1854                 filter_info->pkt_filters[id].pkt_proto =
1855                         ethertype_filter->pkt_proto;
1856                 filter_info->pkt_filters[id].enable = ethertype_filter->enable;
1857                 filter_info->qid = ethertype_filter->qid;
1858                 return id;
1859         }
1860
1861         PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
1862         return -EINVAL;
1863 }
1864
1865 static inline void
1866 hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
1867                               uint8_t idx)
1868 {
1869         if (idx >= HINIC_MAX_Q_FILTERS)
1870                 return;
1871
1872         filter_info->pkt_type = 0;
1873         filter_info->type_mask &= ~(1 << idx);
1874         filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
1875         filter_info->pkt_filters[idx].enable = FALSE;
1876         filter_info->pkt_filters[idx].qid = 0;
1877 }
1878
1879 static inline int
1880 hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
1881                                struct rte_eth_ethertype_filter *filter,
1882                                bool add)
1883 {
1884         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1885         struct hinic_filter_info *filter_info =
1886                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1887         struct hinic_pkt_filter ethertype_filter;
1888         int i;
1889         int ret_fw;
1890
1891         if (hinic_check_ethertype_filter(filter))
1892                 return -EINVAL;
1893
1894         if (add) {
1895                 ethertype_filter.pkt_proto = filter->ether_type;
1896                 ethertype_filter.enable = TRUE;
1897                 ethertype_filter.qid = (u8)filter->queue;
1898                 i = hinic_ethertype_filter_insert(filter_info,
1899                                                     &ethertype_filter);
1900                 if (i < 0)
1901                         return -ENOSPC;
1902
1903                 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
1904                                 filter_info->pkt_type, filter_info->qid,
1905                                 filter_info->pkt_filters[i].enable, true);
1906                 if (ret_fw) {
1907                         PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1908                                 filter_info->pkt_type, filter->queue,
1909                                 filter_info->pkt_filters[i].enable);
1910
1911                         hinic_ethertype_filter_remove(filter_info, i);
1912                         return -ENOENT;
1913                 }
1914                 PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1915                                 filter_info->pkt_type, filter->queue,
1916                                 filter_info->pkt_filters[i].enable);
1917
1918                 switch (ethertype_filter.pkt_proto) {
1919                 case RTE_ETHER_TYPE_SLOW:
1920                         ret_fw = hinic_set_lacp_tcam(nic_dev);
1921                         if (ret_fw) {
1922                                 PMD_DRV_LOG(ERR, "Add lacp tcam failed");
1923                                 hinic_ethertype_filter_remove(filter_info, i);
1924                                 return -ENOENT;
1925                         }
1926
1927                         PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
1928                         break;
1929                 default:
1930                         break;
1931                 }
1932
1933         } else {
1934                 ethertype_filter.pkt_proto = filter->ether_type;
1935                 i = hinic_ethertype_filter_lookup(filter_info,
1936                                                 &ethertype_filter);
1937
1938                 if ((filter_info->type_mask & (1 << i))) {
1939                         filter_info->pkt_filters[i].enable = FALSE;
1940                         (void)hinic_set_fdir_filter(nic_dev->hwdev,
1941                                         filter_info->pkt_type,
1942                                         filter_info->pkt_filters[i].qid,
1943                                         filter_info->pkt_filters[i].enable,
1944                                         true);
1945
1946                         PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1947                                         filter_info->pkt_type,
1948                                         filter_info->pkt_filters[i].qid,
1949                                         filter_info->pkt_filters[i].enable);
1950
1951                         switch (ethertype_filter.pkt_proto) {
1952                         case RTE_ETHER_TYPE_SLOW:
1953                                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
1954                                                                 TCAM_PKT_LACP);
1955                                 PMD_DRV_LOG(INFO,
1956                                         "Del lacp tcam succeed");
1957                                 break;
1958                         default:
1959                                 break;
1960                         }
1961
1962                         hinic_ethertype_filter_remove(filter_info, i);
1963
1964                 } else {
1965                         PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
1966                                         filter_info->pkt_type, filter->queue,
1967                                         filter_info->pkt_filters[i].enable);
1968                         return -ENOENT;
1969                 }
1970         }
1971
1972         return 0;
1973 }
1974
1975 static int
1976 hinic_fdir_info_init(struct hinic_fdir_rule *rule,
1977                      struct hinic_fdir_info *fdir_info)
1978 {
1979         switch (rule->mask.src_ipv4_mask) {
1980         case UINT32_MAX:
1981                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
1982                 fdir_info->qid = rule->queue;
1983                 fdir_info->fdir_key = rule->hinic_fdir.src_ip;
1984                 return 0;
1985
1986         case 0:
1987                 break;
1988
1989         default:
1990                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1991                 return -EINVAL;
1992         }
1993
1994         switch (rule->mask.dst_ipv4_mask) {
1995         case UINT32_MAX:
1996                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
1997                 fdir_info->qid = rule->queue;
1998                 fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
1999                 return 0;
2000
2001         case 0:
2002                 break;
2003
2004         default:
2005                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
2006                 return -EINVAL;
2007         }
2008
2009         if (fdir_info->fdir_flag == 0) {
2010                 PMD_DRV_LOG(ERR, "All support mask is NULL.");
2011                 return -EINVAL;
2012         }
2013
2014         return 0;
2015 }
2016
2017 static inline int
2018 hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
2019                           struct hinic_fdir_rule *rule,
2020                           bool add)
2021 {
2022         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2023         struct hinic_fdir_info fdir_info;
2024         int ret;
2025
2026         memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
2027
2028         ret = hinic_fdir_info_init(rule, &fdir_info);
2029         if (ret) {
2030                 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2031                 return ret;
2032         }
2033
2034         if (add) {
2035                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2036                                                 true, fdir_info.fdir_key,
2037                                                 true, fdir_info.fdir_flag);
2038                 if (ret) {
2039                         PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2040                                         fdir_info.fdir_flag, fdir_info.qid,
2041                                         fdir_info.fdir_key);
2042                         return -ENOENT;
2043                 }
2044                 PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2045                                 fdir_info.fdir_flag, fdir_info.qid,
2046                                 fdir_info.fdir_key);
2047         } else {
2048                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2049                                                 false, fdir_info.fdir_key, true,
2050                                                 fdir_info.fdir_flag);
2051                 if (ret) {
2052                         PMD_DRV_LOG(ERR, "Del fdir filter ailed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2053                                 fdir_info.fdir_flag, fdir_info.qid,
2054                                 fdir_info.fdir_key);
2055                         return -ENOENT;
2056                 }
2057                 PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2058                                 fdir_info.fdir_flag, fdir_info.qid,
2059                                 fdir_info.fdir_key);
2060         }
2061
2062         return 0;
2063 }
2064
2065 /**
2066  * Create or destroy a flow rule.
2067  * Theorically one rule can match more than one filters.
2068  * We will let it use the filter which it hitt first.
2069  * So, the sequence matters.
2070  */
2071 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
2072                                         const struct rte_flow_attr *attr,
2073                                         const struct rte_flow_item pattern[],
2074                                         const struct rte_flow_action actions[],
2075                                         struct rte_flow_error *error)
2076 {
2077         int ret;
2078         struct rte_eth_ntuple_filter ntuple_filter;
2079         struct rte_eth_ethertype_filter ethertype_filter;
2080         struct hinic_fdir_rule fdir_rule;
2081         struct rte_flow *flow = NULL;
2082         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2083         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2084         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2085         struct hinic_flow_mem *hinic_flow_mem_ptr;
2086         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2087
2088         flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
2089         if (!flow) {
2090                 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
2091                 return NULL;
2092         }
2093
2094         hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
2095                         sizeof(struct hinic_flow_mem), 0);
2096         if (!hinic_flow_mem_ptr) {
2097                 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
2098                 rte_free(flow);
2099                 return NULL;
2100         }
2101
2102         hinic_flow_mem_ptr->flow = flow;
2103         TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2104                                 entries);
2105
2106         /* Add ntuple filter */
2107         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2108         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
2109                         actions, &ntuple_filter, error);
2110         if (!ret) {
2111                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2112                 if (!ret) {
2113                         ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
2114                                 sizeof(struct hinic_ntuple_filter_ele), 0);
2115                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2116                                    &ntuple_filter,
2117                                    sizeof(struct rte_eth_ntuple_filter));
2118                         TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
2119                         ntuple_filter_ptr, entries);
2120                         flow->rule = ntuple_filter_ptr;
2121                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2122
2123                         PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
2124                         hinic_global_func_id(nic_dev->hwdev));
2125                         return flow;
2126                 }
2127                 goto out;
2128         }
2129
2130         /* Add ethertype filter */
2131         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2132         ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
2133                                         &ethertype_filter, error);
2134         if (!ret) {
2135                 ret = hinic_add_del_ethertype_filter(dev, &ethertype_filter,
2136                                                      TRUE);
2137                 if (!ret) {
2138                         ethertype_filter_ptr =
2139                                 rte_zmalloc("hinic_ethertype_filter",
2140                                 sizeof(struct hinic_ethertype_filter_ele), 0);
2141                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2142                                 &ethertype_filter,
2143                                 sizeof(struct rte_eth_ethertype_filter));
2144                         TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
2145                                 ethertype_filter_ptr, entries);
2146                         flow->rule = ethertype_filter_ptr;
2147                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2148
2149                         PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
2150                                         hinic_global_func_id(nic_dev->hwdev));
2151                         return flow;
2152                 }
2153                 goto out;
2154         }
2155
2156         /* Add fdir filter */
2157         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
2158         ret = hinic_parse_fdir_filter(dev, attr, pattern,
2159                                       actions, &fdir_rule, error);
2160         if (!ret) {
2161                 ret = hinic_add_del_fdir_filter(dev, &fdir_rule, TRUE);
2162                 if (!ret) {
2163                         fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
2164                                 sizeof(struct hinic_fdir_rule_ele), 0);
2165                         rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
2166                                 sizeof(struct hinic_fdir_rule));
2167                         TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
2168                                 fdir_rule_ptr, entries);
2169                         flow->rule = fdir_rule_ptr;
2170                         flow->filter_type = RTE_ETH_FILTER_FDIR;
2171
2172                         PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
2173                                         hinic_global_func_id(nic_dev->hwdev));
2174                         return flow;
2175                 }
2176                 goto out;
2177         }
2178
2179 out:
2180         TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
2181         rte_flow_error_set(error, -ret,
2182                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2183                            "Failed to create flow.");
2184         rte_free(hinic_flow_mem_ptr);
2185         rte_free(flow);
2186         return NULL;
2187 }
2188
2189 /* Destroy a flow rule on hinic. */
2190 static int hinic_flow_destroy(struct rte_eth_dev *dev,
2191                               struct rte_flow *flow,
2192                               struct rte_flow_error *error)
2193 {
2194         int ret;
2195         struct rte_flow *pmd_flow = flow;
2196         enum rte_filter_type filter_type = pmd_flow->filter_type;
2197         struct rte_eth_ntuple_filter ntuple_filter;
2198         struct rte_eth_ethertype_filter ethertype_filter;
2199         struct hinic_fdir_rule fdir_rule;
2200         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2201         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2202         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2203         struct hinic_flow_mem *hinic_flow_mem_ptr;
2204         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2205
2206         switch (filter_type) {
2207         case RTE_ETH_FILTER_NTUPLE:
2208                 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
2209                                         pmd_flow->rule;
2210                 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
2211                         sizeof(struct rte_eth_ntuple_filter));
2212                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2213                 if (!ret) {
2214                         TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
2215                                 ntuple_filter_ptr, entries);
2216                         rte_free(ntuple_filter_ptr);
2217                 }
2218                 break;
2219         case RTE_ETH_FILTER_ETHERTYPE:
2220                 ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
2221                                         pmd_flow->rule;
2222                 rte_memcpy(&ethertype_filter,
2223                         &ethertype_filter_ptr->filter_info,
2224                         sizeof(struct rte_eth_ethertype_filter));
2225                 ret = hinic_add_del_ethertype_filter(dev,
2226                                 &ethertype_filter, FALSE);
2227                 if (!ret) {
2228                         TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
2229                                 ethertype_filter_ptr, entries);
2230                         rte_free(ethertype_filter_ptr);
2231                 }
2232                 break;
2233         case RTE_ETH_FILTER_FDIR:
2234                 fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
2235                 rte_memcpy(&fdir_rule,
2236                         &fdir_rule_ptr->filter_info,
2237                         sizeof(struct hinic_fdir_rule));
2238                 ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
2239                 if (!ret) {
2240                         TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
2241                                 fdir_rule_ptr, entries);
2242                         rte_free(fdir_rule_ptr);
2243                 }
2244                 break;
2245         default:
2246                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2247                         filter_type);
2248                 ret = -EINVAL;
2249                 break;
2250         }
2251
2252         if (ret) {
2253                 rte_flow_error_set(error, EINVAL,
2254                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2255                                 NULL, "Failed to destroy flow");
2256                 return ret;
2257         }
2258
2259         TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
2260                 if (hinic_flow_mem_ptr->flow == pmd_flow) {
2261                         TAILQ_REMOVE(&nic_dev->hinic_flow_list,
2262                                 hinic_flow_mem_ptr, entries);
2263                         rte_free(hinic_flow_mem_ptr);
2264                         break;
2265                 }
2266         }
2267         rte_free(flow);
2268
2269         PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
2270                         hinic_global_func_id(nic_dev->hwdev));
2271
2272         return ret;
2273 }
2274
2275 /* Remove all the n-tuple filters */
2276 static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev)
2277 {
2278         struct hinic_filter_info *filter_info =
2279                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2280         struct hinic_5tuple_filter *p_5tuple;
2281
2282         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
2283                 hinic_remove_5tuple_filter(dev, p_5tuple);
2284 }
2285
2286 /* Remove all the ether type filters */
2287 static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
2288 {
2289         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2290         struct hinic_filter_info *filter_info =
2291                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
2292         int ret = 0;
2293
2294         if (filter_info->type_mask &
2295                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) {
2296                 hinic_ethertype_filter_remove(filter_info,
2297                         HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE));
2298                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE,
2299                                         filter_info->qid, false, true);
2300
2301                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
2302         }
2303
2304         if (filter_info->type_mask &
2305                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) {
2306                 hinic_ethertype_filter_remove(filter_info,
2307                         HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE));
2308                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE,
2309                         filter_info->qid, false, true);
2310         }
2311
2312         if (ret)
2313                 PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x",
2314                                 filter_info->pkt_type);
2315 }
2316
2317 /* Remove all the ether type filters */
2318 static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
2319 {
2320         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2321
2322         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
2323 }
2324
2325 static void hinic_filterlist_flush(struct rte_eth_dev *dev)
2326 {
2327         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2328         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2329         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2330         struct hinic_flow_mem *hinic_flow_mem_ptr;
2331         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2332
2333         while ((ntuple_filter_ptr =
2334                         TAILQ_FIRST(&nic_dev->filter_ntuple_list))) {
2335                 TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr,
2336                                  entries);
2337                 rte_free(ntuple_filter_ptr);
2338         }
2339
2340         while ((ethertype_filter_ptr =
2341                         TAILQ_FIRST(&nic_dev->filter_ethertype_list))) {
2342                 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
2343                                 ethertype_filter_ptr,
2344                                 entries);
2345                 rte_free(ethertype_filter_ptr);
2346         }
2347
2348         while ((fdir_rule_ptr =
2349                         TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) {
2350                 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr,
2351                                  entries);
2352                 rte_free(fdir_rule_ptr);
2353         }
2354
2355         while ((hinic_flow_mem_ptr =
2356                         TAILQ_FIRST(&nic_dev->hinic_flow_list))) {
2357                 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2358                                  entries);
2359                 rte_free(hinic_flow_mem_ptr->flow);
2360                 rte_free(hinic_flow_mem_ptr);
2361         }
2362 }
2363
2364 /* Destroy all flow rules associated with a port on hinic. */
2365 static int hinic_flow_flush(struct rte_eth_dev *dev,
2366                                 __rte_unused struct rte_flow_error *error)
2367 {
2368         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2369
2370         hinic_clear_all_ntuple_filter(dev);
2371         hinic_clear_all_ethertype_filter(dev);
2372         hinic_clear_all_fdir_filter(dev);
2373         hinic_filterlist_flush(dev);
2374
2375         PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x",
2376                         hinic_global_func_id(nic_dev->hwdev));
2377         return 0;
2378 }
2379
2380 const struct rte_flow_ops hinic_flow_ops = {
2381         .validate = hinic_flow_validate,
2382         .create = hinic_flow_create,
2383         .destroy = hinic_flow_destroy,
2384         .flush = hinic_flow_flush,
2385 };