net/mlx5: close tools socket with last device
[dpdk.git] / drivers / net / hinic / hinic_pmd_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
25
26 #define HINIC_MAX_RX_QUEUE_NUM          64
27
28 #ifndef UINT8_MAX
29 #define UINT8_MAX          (u8)(~((u8)0))       /* 0xFF               */
30 #define UINT16_MAX         (u16)(~((u16)0))     /* 0xFFFF             */
31 #define UINT32_MAX         (u32)(~((u32)0))     /* 0xFFFFFFFF         */
32 #define UINT64_MAX         (u64)(~((u64)0))     /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX          (0x7F)
34 #endif
35
36 /* IPSURX MACRO */
37 #define PA_ETH_TYPE_ROCE                0
38 #define PA_ETH_TYPE_IPV4                1
39 #define PA_ETH_TYPE_IPV6                2
40 #define PA_ETH_TYPE_OTHER               3
41
42 #define PA_IP_PROTOCOL_TYPE_TCP         1
43 #define PA_IP_PROTOCOL_TYPE_UDP         2
44 #define PA_IP_PROTOCOL_TYPE_ICMP        3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP   4
46 #define PA_IP_PROTOCOL_TYPE_SCTP        5
47 #define PA_IP_PROTOCOL_TYPE_VRRP        112
48
49 #define IP_HEADER_PROTOCOL_TYPE_TCP     6
50 #define IP_HEADER_PROTOCOL_TYPE_UDP     17
51 #define IP_HEADER_PROTOCOL_TYPE_ICMP    1
52 #define IP_HEADER_PROTOCOL_TYPE_ICMPV6  58
53
54 #define FDIR_TCAM_NORMAL_PACKET         0
55 #define FDIR_TCAM_TUNNEL_PACKET         1
56
57 #define HINIC_MIN_N_TUPLE_PRIO          1
58 #define HINIC_MAX_N_TUPLE_PRIO          7
59
60 /* TCAM type mask in hardware */
61 #define TCAM_PKT_BGP_SPORT      1
62 #define TCAM_PKT_VRRP           2
63 #define TCAM_PKT_BGP_DPORT      3
64 #define TCAM_PKT_LACP           4
65
66 #define TCAM_DIP_IPV4_TYPE      0
67 #define TCAM_DIP_IPV6_TYPE      1
68
69 #define BGP_DPORT_ID            179
70 #define IPPROTO_VRRP            112
71
72 /* Packet type defined in hardware to perform filter */
73 #define PKT_IGMP_IPV4_TYPE     64
74 #define PKT_ICMP_IPV4_TYPE     65
75 #define PKT_ICMP_IPV6_TYPE     66
76 #define PKT_ICMP_IPV6RS_TYPE   67
77 #define PKT_ICMP_IPV6RA_TYPE   68
78 #define PKT_ICMP_IPV6NS_TYPE   69
79 #define PKT_ICMP_IPV6NA_TYPE   70
80 #define PKT_ICMP_IPV6RE_TYPE   71
81 #define PKT_DHCP_IPV4_TYPE     72
82 #define PKT_DHCP_IPV6_TYPE     73
83 #define PKT_LACP_TYPE          74
84 #define PKT_ARP_REQ_TYPE       79
85 #define PKT_ARP_REP_TYPE       80
86 #define PKT_ARP_TYPE           81
87 #define PKT_BGPD_DPORT_TYPE    83
88 #define PKT_BGPD_SPORT_TYPE    84
89 #define PKT_VRRP_TYPE          85
90
91 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
92         (&((struct hinic_nic_dev *)nic_dev)->filter)
93
94 #define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \
95         (&((struct hinic_nic_dev *)nic_dev)->tcam)
96
97
98 enum hinic_atr_flow_type {
99         HINIC_ATR_FLOW_TYPE_IPV4_DIP    = 0x1,
100         HINIC_ATR_FLOW_TYPE_IPV4_SIP    = 0x2,
101         HINIC_ATR_FLOW_TYPE_DPORT       = 0x3,
102         HINIC_ATR_FLOW_TYPE_SPORT       = 0x4,
103 };
104
105 /* Structure to store fdir's info. */
106 struct hinic_fdir_info {
107         uint8_t fdir_flag;
108         uint8_t qid;
109         uint32_t fdir_key;
110 };
111
112 /**
113  * Endless loop will never happen with below assumption
114  * 1. there is at least one no-void item(END)
115  * 2. cur is before END.
116  */
117 static inline const struct rte_flow_item *
118 next_no_void_pattern(const struct rte_flow_item pattern[],
119                 const struct rte_flow_item *cur)
120 {
121         const struct rte_flow_item *next =
122                 cur ? cur + 1 : &pattern[0];
123         while (1) {
124                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
125                         return next;
126                 next++;
127         }
128 }
129
130 static inline const struct rte_flow_action *
131 next_no_void_action(const struct rte_flow_action actions[],
132                 const struct rte_flow_action *cur)
133 {
134         const struct rte_flow_action *next =
135                 cur ? cur + 1 : &actions[0];
136         while (1) {
137                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
138                         return next;
139                 next++;
140         }
141 }
142
143 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
144                                         struct rte_flow_error *error)
145 {
146         /* Must be input direction */
147         if (!attr->ingress) {
148                 rte_flow_error_set(error, EINVAL,
149                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
150                         attr, "Only support ingress.");
151                 return -rte_errno;
152         }
153
154         if (attr->egress) {
155                 rte_flow_error_set(error, EINVAL,
156                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
157                                 attr, "Not support egress.");
158                 return -rte_errno;
159         }
160
161         if (attr->priority) {
162                 rte_flow_error_set(error, EINVAL,
163                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
164                                 attr, "Not support priority.");
165                 return -rte_errno;
166         }
167
168         if (attr->group) {
169                 rte_flow_error_set(error, EINVAL,
170                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
171                                 attr, "Not support group.");
172                 return -rte_errno;
173         }
174
175         return 0;
176 }
177
178 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
179                                 const struct rte_flow_item *pattern,
180                                 const struct rte_flow_action *actions,
181                                 struct rte_flow_error *error)
182 {
183         if (!pattern) {
184                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
185                                 NULL, "NULL pattern.");
186                 return -rte_errno;
187         }
188
189         if (!actions) {
190                 rte_flow_error_set(error, EINVAL,
191                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
192                                 NULL, "NULL action.");
193                 return -rte_errno;
194         }
195
196         if (!attr) {
197                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
198                                    NULL, "NULL attribute.");
199                 return -rte_errno;
200         }
201
202         return 0;
203 }
204
205 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
206                                         struct rte_flow_error *error)
207 {
208         /* The first non-void item should be MAC */
209         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
210                 rte_flow_error_set(error, EINVAL,
211                         RTE_FLOW_ERROR_TYPE_ITEM,
212                         item, "Not supported by ethertype filter");
213                 return -rte_errno;
214         }
215
216         /* Not supported last point for range */
217         if (item->last) {
218                 rte_flow_error_set(error, EINVAL,
219                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
220                         item, "Not supported last point for range");
221                 return -rte_errno;
222         }
223
224         /* Get the MAC info. */
225         if (!item->spec || !item->mask) {
226                 rte_flow_error_set(error, EINVAL,
227                                 RTE_FLOW_ERROR_TYPE_ITEM,
228                                 item, "Not supported by ethertype filter");
229                 return -rte_errno;
230         }
231         return 0;
232 }
233
234 static int
235 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
236                         const struct rte_flow_action *act,
237                         const struct rte_flow_action_queue *act_q,
238                         struct rte_eth_ethertype_filter *filter,
239                         struct rte_flow_error *error)
240 {
241         /* Parse action */
242         act = next_no_void_action(actions, NULL);
243         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
244                 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
245                 rte_flow_error_set(error, EINVAL,
246                                 RTE_FLOW_ERROR_TYPE_ACTION,
247                                 act, "Not supported action.");
248                 return -rte_errno;
249         }
250
251         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
252                 act_q = (const struct rte_flow_action_queue *)act->conf;
253                 filter->queue = act_q->index;
254         } else {
255                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
256         }
257
258         /* Check if the next non-void item is END */
259         act = next_no_void_action(actions, act);
260         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
261                 rte_flow_error_set(error, EINVAL,
262                                 RTE_FLOW_ERROR_TYPE_ACTION,
263                                 act, "Not supported action.");
264                 return -rte_errno;
265         }
266
267         return 0;
268 }
269
270 /**
271  * Parse the rule to see if it is a ethertype rule.
272  * And get the ethertype filter info BTW.
273  * pattern:
274  * The first not void item can be ETH.
275  * The next not void item must be END.
276  * action:
277  * The first not void action should be QUEUE.
278  * The next not void action should be END.
279  * pattern example:
280  * ITEM         Spec                    Mask
281  * ETH          type    0x0807          0xFFFF
282  * END
283  * other members in mask and spec should set to 0x00.
284  * item->last should be NULL.
285  */
286 static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
287                         const struct rte_flow_item *pattern,
288                         const struct rte_flow_action *actions,
289                         struct rte_eth_ethertype_filter *filter,
290                         struct rte_flow_error *error)
291 {
292         const struct rte_flow_item *item;
293         const struct rte_flow_action *act = NULL;
294         const struct rte_flow_item_eth *eth_spec;
295         const struct rte_flow_item_eth *eth_mask;
296         const struct rte_flow_action_queue *act_q = NULL;
297
298         if (hinic_check_filter_arg(attr, pattern, actions, error))
299                 return -rte_errno;
300
301         item = next_no_void_pattern(pattern, NULL);
302         if (hinic_check_ethertype_first_item(item, error))
303                 return -rte_errno;
304
305         eth_spec = (const struct rte_flow_item_eth *)item->spec;
306         eth_mask = (const struct rte_flow_item_eth *)item->mask;
307
308         /*
309          * Mask bits of source MAC address must be full of 0.
310          * Mask bits of destination MAC address must be full
311          * of 1 or full of 0.
312          */
313         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
314             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
315              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
316                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
317                                 item, "Invalid ether address mask");
318                 return -rte_errno;
319         }
320
321         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
322                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
323                                 item, "Invalid ethertype mask");
324                 return -rte_errno;
325         }
326
327         /*
328          * If mask bits of destination MAC address
329          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
330          */
331         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
332                 filter->mac_addr = eth_spec->dst;
333                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
334         } else {
335                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
336         }
337         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
338
339         /* Check if the next non-void item is END. */
340         item = next_no_void_pattern(pattern, item);
341         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
342                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
343                         item, "Not supported by ethertype filter.");
344                 return -rte_errno;
345         }
346
347         if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
348                 return -rte_errno;
349
350         if (hinic_check_ethertype_attr_ele(attr, error))
351                 return -rte_errno;
352
353         return 0;
354 }
355
356 static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
357                         const struct rte_flow_attr *attr,
358                         const struct rte_flow_item pattern[],
359                         const struct rte_flow_action actions[],
360                         struct rte_eth_ethertype_filter *filter,
361                         struct rte_flow_error *error)
362 {
363         if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
364                 return -rte_errno;
365
366         /* NIC doesn't support MAC address. */
367         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
368                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
369                 rte_flow_error_set(error, EINVAL,
370                         RTE_FLOW_ERROR_TYPE_ITEM,
371                         NULL, "Not supported by ethertype filter");
372                 return -rte_errno;
373         }
374
375         if (filter->queue >= dev->data->nb_rx_queues) {
376                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
377                 rte_flow_error_set(error, EINVAL,
378                         RTE_FLOW_ERROR_TYPE_ITEM,
379                         NULL, "Queue index much too big");
380                 return -rte_errno;
381         }
382
383         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
384                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
385                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
386                 rte_flow_error_set(error, EINVAL,
387                         RTE_FLOW_ERROR_TYPE_ITEM,
388                         NULL, "IPv4/IPv6 not supported by ethertype filter");
389                 return -rte_errno;
390         }
391
392         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
393                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
394                 rte_flow_error_set(error, EINVAL,
395                         RTE_FLOW_ERROR_TYPE_ITEM,
396                         NULL, "Drop option is unsupported");
397                 return -rte_errno;
398         }
399
400         /* Hinic only support LACP/ARP for ether type */
401         if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
402                 filter->ether_type != RTE_ETHER_TYPE_ARP) {
403                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
404                 rte_flow_error_set(error, EINVAL,
405                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
406                         "only lacp/arp type supported by ethertype filter");
407                 return -rte_errno;
408         }
409
410         return 0;
411 }
412
413 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
414                                 struct rte_eth_ntuple_filter *filter,
415                                 struct rte_flow_error *error)
416 {
417         /* Must be input direction */
418         if (!attr->ingress) {
419                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
420                 rte_flow_error_set(error, EINVAL,
421                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
422                                    attr, "Only support ingress.");
423                 return -rte_errno;
424         }
425
426         if (attr->egress) {
427                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
428                 rte_flow_error_set(error, EINVAL,
429                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
430                                    attr, "Not support egress.");
431                 return -rte_errno;
432         }
433
434         if (attr->priority > 0xFFFF) {
435                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
436                 rte_flow_error_set(error, EINVAL,
437                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
438                                    attr, "Error priority.");
439                 return -rte_errno;
440         }
441
442         if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
443                     attr->priority > HINIC_MAX_N_TUPLE_PRIO)
444                 filter->priority = 1;
445         else
446                 filter->priority = (uint16_t)attr->priority;
447
448         return 0;
449 }
450
451 static int
452 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
453                         const struct rte_flow_action actions[],
454                         struct rte_eth_ntuple_filter *filter,
455                         struct rte_flow_error *error)
456 {
457         const struct rte_flow_action *act;
458         /*
459          * n-tuple only supports forwarding,
460          * check if the first not void action is QUEUE.
461          */
462         act = next_no_void_action(actions, NULL);
463         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
464                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
465                 rte_flow_error_set(error, EINVAL,
466                         RTE_FLOW_ERROR_TYPE_ACTION,
467                         act, "Flow action type is not QUEUE.");
468                 return -rte_errno;
469         }
470         filter->queue =
471                 ((const struct rte_flow_action_queue *)act->conf)->index;
472
473         /* Check if the next not void item is END */
474         act = next_no_void_action(actions, act);
475         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
476                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
477                 rte_flow_error_set(error, EINVAL,
478                         RTE_FLOW_ERROR_TYPE_ACTION,
479                         act, "Next not void item is not END.");
480                 return -rte_errno;
481         }
482
483         return 0;
484 }
485
486 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
487                                         const struct rte_flow_item pattern[],
488                                         struct rte_flow_error *error)
489 {
490         const struct rte_flow_item *item;
491
492         /* The first not void item can be MAC or IPv4 */
493         item = next_no_void_pattern(pattern, NULL);
494
495         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
496                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
497                 rte_flow_error_set(error, EINVAL,
498                         RTE_FLOW_ERROR_TYPE_ITEM,
499                         item, "Not supported by ntuple filter");
500                 return -rte_errno;
501         }
502
503         /* Skip Ethernet */
504         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
505                 /* Not supported last point for range */
506                 if (item->last) {
507                         rte_flow_error_set(error,
508                                 EINVAL,
509                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
510                                 item, "Not supported last point for range");
511                         return -rte_errno;
512                 }
513                 /* if the first item is MAC, the content should be NULL */
514                 if (item->spec || item->mask) {
515                         rte_flow_error_set(error, EINVAL,
516                                 RTE_FLOW_ERROR_TYPE_ITEM,
517                                 item, "Not supported by ntuple filter");
518                         return -rte_errno;
519                 }
520                 /* check if the next not void item is IPv4 */
521                 item = next_no_void_pattern(pattern, item);
522                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
523                         rte_flow_error_set(error,
524                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
525                                 item, "Not supported by ntuple filter");
526                         return -rte_errno;
527                 }
528         }
529
530         *ipv4_item = item;
531         return 0;
532 }
533
534 static int
535 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
536                         const struct rte_flow_item pattern[],
537                         struct rte_eth_ntuple_filter *filter,
538                         struct rte_flow_error *error)
539 {
540         const struct rte_flow_item_ipv4 *ipv4_spec;
541         const struct rte_flow_item_ipv4 *ipv4_mask;
542         const struct rte_flow_item *item = *in_out_item;
543
544         /* Get the IPv4 info */
545         if (!item->spec || !item->mask) {
546                 rte_flow_error_set(error, EINVAL,
547                         RTE_FLOW_ERROR_TYPE_ITEM,
548                         item, "Invalid ntuple mask");
549                 return -rte_errno;
550         }
551         /* Not supported last point for range */
552         if (item->last) {
553                 rte_flow_error_set(error, EINVAL,
554                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
555                         item, "Not supported last point for range");
556                 return -rte_errno;
557         }
558
559         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
560         /*
561          * Only support src & dst addresses, protocol,
562          * others should be masked.
563          */
564         if (ipv4_mask->hdr.version_ihl ||
565                 ipv4_mask->hdr.type_of_service ||
566                 ipv4_mask->hdr.total_length ||
567                 ipv4_mask->hdr.packet_id ||
568                 ipv4_mask->hdr.fragment_offset ||
569                 ipv4_mask->hdr.time_to_live ||
570                 ipv4_mask->hdr.hdr_checksum ||
571                 !ipv4_mask->hdr.next_proto_id) {
572                 rte_flow_error_set(error,
573                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
574                         item, "Not supported by ntuple filter");
575                 return -rte_errno;
576         }
577
578         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
579         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
580         filter->proto_mask = ipv4_mask->hdr.next_proto_id;
581
582         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
583         filter->dst_ip = ipv4_spec->hdr.dst_addr;
584         filter->src_ip = ipv4_spec->hdr.src_addr;
585         filter->proto  = ipv4_spec->hdr.next_proto_id;
586
587         /* Get next no void item */
588         *in_out_item = next_no_void_pattern(pattern, item);
589         return 0;
590 }
591
592 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
593                                 const struct rte_flow_item pattern[],
594                                 struct rte_eth_ntuple_filter *filter,
595                                 struct rte_flow_error *error)
596 {
597         const struct rte_flow_item_tcp *tcp_spec;
598         const struct rte_flow_item_tcp *tcp_mask;
599         const struct rte_flow_item_icmp *icmp_mask;
600         const struct rte_flow_item *item = *in_out_item;
601         u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
602
603         if (item->type == RTE_FLOW_ITEM_TYPE_END)
604                 return 0;
605
606         /* Get TCP or UDP info */
607         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
608                 (!item->spec || !item->mask)) {
609                 memset(filter, 0, ntuple_filter_size);
610                 rte_flow_error_set(error, EINVAL,
611                         RTE_FLOW_ERROR_TYPE_ITEM,
612                         item, "Invalid ntuple mask");
613                 return -rte_errno;
614         }
615
616         /* Not supported last point for range */
617         if (item->last) {
618                 memset(filter, 0, ntuple_filter_size);
619                 rte_flow_error_set(error, EINVAL,
620                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
621                         item, "Not supported last point for range");
622                 return -rte_errno;
623         }
624
625         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
626                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
627
628                 /*
629                  * Only support src & dst ports, tcp flags,
630                  * others should be masked.
631                  */
632                 if (tcp_mask->hdr.sent_seq ||
633                         tcp_mask->hdr.recv_ack ||
634                         tcp_mask->hdr.data_off ||
635                         tcp_mask->hdr.rx_win ||
636                         tcp_mask->hdr.cksum ||
637                         tcp_mask->hdr.tcp_urp) {
638                         memset(filter, 0, ntuple_filter_size);
639                         rte_flow_error_set(error, EINVAL,
640                                 RTE_FLOW_ERROR_TYPE_ITEM,
641                                 item, "Not supported by ntuple filter");
642                         return -rte_errno;
643                 }
644
645                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
646                 filter->src_port_mask  = tcp_mask->hdr.src_port;
647                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
648                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
649                 } else if (!tcp_mask->hdr.tcp_flags) {
650                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
651                 } else {
652                         memset(filter, 0, ntuple_filter_size);
653                         rte_flow_error_set(error, EINVAL,
654                                 RTE_FLOW_ERROR_TYPE_ITEM,
655                                 item, "Not supported by ntuple filter");
656                         return -rte_errno;
657                 }
658
659                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
660                 filter->dst_port  = tcp_spec->hdr.dst_port;
661                 filter->src_port  = tcp_spec->hdr.src_port;
662                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
663         } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
664                 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
665
666                 /* ICMP all should be masked. */
667                 if (icmp_mask->hdr.icmp_cksum ||
668                         icmp_mask->hdr.icmp_ident ||
669                         icmp_mask->hdr.icmp_seq_nb ||
670                         icmp_mask->hdr.icmp_type ||
671                         icmp_mask->hdr.icmp_code) {
672                         memset(filter, 0, ntuple_filter_size);
673                         rte_flow_error_set(error, EINVAL,
674                                 RTE_FLOW_ERROR_TYPE_ITEM,
675                                 item, "Not supported by ntuple filter");
676                         return -rte_errno;
677                 }
678         }
679
680         /* Get next no void item */
681         *in_out_item = next_no_void_pattern(pattern, item);
682         return 0;
683 }
684
685 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
686                                         struct rte_eth_ntuple_filter *filter,
687                                         struct rte_flow_error *error)
688 {
689         /* Check if the next not void item is END */
690         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
691                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
692                 rte_flow_error_set(error, EINVAL,
693                         RTE_FLOW_ERROR_TYPE_ITEM,
694                         item, "Not supported by ntuple filter");
695                 return -rte_errno;
696         }
697
698         return 0;
699 }
700
701 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
702                                         const struct rte_flow_item pattern[],
703                                         struct rte_eth_ntuple_filter *filter,
704                                         struct rte_flow_error *error)
705 {
706         if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
707                 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
708                 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
709                 hinic_ntuple_item_check_end(item, filter, error))
710                 return -rte_errno;
711
712         return 0;
713 }
714
715 /**
716  * Parse the rule to see if it is a n-tuple rule.
717  * And get the n-tuple filter info BTW.
718  * pattern:
719  * The first not void item can be ETH or IPV4.
720  * The second not void item must be IPV4 if the first one is ETH.
721  * The third not void item must be UDP or TCP.
722  * The next not void item must be END.
723  * action:
724  * The first not void action should be QUEUE.
725  * The next not void action should be END.
726  * pattern example:
727  * ITEM         Spec                    Mask
728  * ETH          NULL                    NULL
729  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
730  *              dst_addr 192.167.3.50   0xFFFFFFFF
731  *              next_proto_id   17      0xFF
732  * UDP/TCP/     src_port        80      0xFFFF
733  * SCTP         dst_port        80      0xFFFF
734  * END
735  * other members in mask and spec should set to 0x00.
736  * item->last should be NULL.
737  * Please aware there's an asumption for all the parsers.
738  * rte_flow_item is using big endian, rte_flow_attr and
739  * rte_flow_action are using CPU order.
740  * Because the pattern is used to describe the packets,
741  * normally the packets should use network order.
742  */
743 static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
744                         const struct rte_flow_item pattern[],
745                         const struct rte_flow_action actions[],
746                         struct rte_eth_ntuple_filter *filter,
747                         struct rte_flow_error *error)
748 {
749         const struct rte_flow_item *item = NULL;
750
751         if (hinic_check_filter_arg(attr, pattern, actions, error))
752                 return -rte_errno;
753
754         if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
755                 return -rte_errno;
756
757         if (hinic_check_ntuple_act_ele(item, actions, filter, error))
758                 return -rte_errno;
759
760         if (hinic_check_ntuple_attr_ele(attr, filter, error))
761                 return -rte_errno;
762
763         return 0;
764 }
765
766 static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
767                         const struct rte_flow_attr *attr,
768                         const struct rte_flow_item pattern[],
769                         const struct rte_flow_action actions[],
770                         struct rte_eth_ntuple_filter *filter,
771                         struct rte_flow_error *error)
772 {
773         int ret;
774
775         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
776         if (ret)
777                 return ret;
778
779         /* Hinic doesn't support tcp flags */
780         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
781                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
782                 rte_flow_error_set(error, EINVAL,
783                                    RTE_FLOW_ERROR_TYPE_ITEM,
784                                    NULL, "Not supported by ntuple filter");
785                 return -rte_errno;
786         }
787
788         /* Hinic doesn't support many priorities */
789         if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
790             filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
791                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
792                 rte_flow_error_set(error, EINVAL,
793                         RTE_FLOW_ERROR_TYPE_ITEM,
794                         NULL, "Priority not supported by ntuple filter");
795                 return -rte_errno;
796         }
797
798         if (filter->queue >= dev->data->nb_rx_queues)
799                 return -rte_errno;
800
801         /* Fixed value for hinic */
802         filter->flags = RTE_5TUPLE_FLAGS;
803         return 0;
804 }
805
806 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
807                                         const struct rte_flow_item pattern[],
808                                         struct rte_flow_error *error)
809 {
810         const struct rte_flow_item *item;
811
812         /* The first not void item can be MAC or IPv4  or TCP or UDP */
813         item = next_no_void_pattern(pattern, NULL);
814
815         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
816                 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
817                 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
818                 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
819                 rte_flow_error_set(error, EINVAL,
820                         RTE_FLOW_ERROR_TYPE_ITEM, item,
821                         "Not supported by fdir filter,support mac,ipv4,tcp,udp");
822                 return -rte_errno;
823         }
824
825         /* Not supported last point for range */
826         if (item->last) {
827                 rte_flow_error_set(error, EINVAL,
828                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
829                         "Not supported last point for range");
830                 return -rte_errno;
831         }
832
833         /* Skip Ethernet */
834         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
835                 /* All should be masked. */
836                 if (item->spec || item->mask) {
837                         rte_flow_error_set(error, EINVAL,
838                                 RTE_FLOW_ERROR_TYPE_ITEM,
839                                 item, "Not supported by fdir filter,support mac");
840                         return -rte_errno;
841                 }
842                 /* Check if the next not void item is IPv4 */
843                 item = next_no_void_pattern(pattern, item);
844                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
845                         item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
846                         rte_flow_error_set(error, EINVAL,
847                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
848                                 "Not supported by fdir filter,support mac,ipv4");
849                         return -rte_errno;
850                 }
851         }
852
853         *ip_item = item;
854         return 0;
855 }
856
857 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
858                                 const struct rte_flow_item pattern[],
859                                 struct hinic_fdir_rule *rule,
860                                 struct rte_flow_error *error)
861 {
862         const struct rte_flow_item_ipv4 *ipv4_spec;
863         const struct rte_flow_item_ipv4 *ipv4_mask;
864         const struct rte_flow_item_ipv6 *ipv6_spec;
865         const struct rte_flow_item_ipv6 *ipv6_mask;
866         const struct rte_flow_item *item = *in_out_item;
867         int i;
868
869         /* Get the IPv4 info */
870         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
871                 /* Not supported last point for range */
872                 if (item->last) {
873                         rte_flow_error_set(error, EINVAL,
874                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
875                                 item, "Not supported last point for range");
876                         return -rte_errno;
877                 }
878
879                 if (!item->mask) {
880                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
881                         rte_flow_error_set(error, EINVAL,
882                                 RTE_FLOW_ERROR_TYPE_ITEM,
883                                 item, "Invalid fdir filter mask");
884                         return -rte_errno;
885                 }
886
887                 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
888                 /*
889                  * Only support src & dst addresses,
890                  * others should be masked.
891                  */
892                 if (ipv4_mask->hdr.version_ihl ||
893                         ipv4_mask->hdr.type_of_service ||
894                         ipv4_mask->hdr.total_length ||
895                         ipv4_mask->hdr.packet_id ||
896                         ipv4_mask->hdr.fragment_offset ||
897                         ipv4_mask->hdr.time_to_live ||
898                         ipv4_mask->hdr.next_proto_id ||
899                         ipv4_mask->hdr.hdr_checksum) {
900                         rte_flow_error_set(error,
901                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
902                                 "Not supported by fdir filter, support src,dst ip");
903                         return -rte_errno;
904                 }
905
906                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
907                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
908                 rule->mode = HINIC_FDIR_MODE_NORMAL;
909
910                 if (item->spec) {
911                         ipv4_spec =
912                                 (const struct rte_flow_item_ipv4 *)item->spec;
913                         rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
914                         rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
915                 }
916
917                 /*
918                  * Check if the next not void item is
919                  * TCP or UDP or END.
920                  */
921                 item = next_no_void_pattern(pattern, item);
922                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
923                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
924                     item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
925                     item->type != RTE_FLOW_ITEM_TYPE_ANY &&
926                     item->type != RTE_FLOW_ITEM_TYPE_END) {
927                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
928                         rte_flow_error_set(error, EINVAL,
929                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
930                                 "Not supported by fdir filter, support tcp, udp, end");
931                         return -rte_errno;
932                 }
933         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
934                 /* Not supported last point for range */
935                 if (item->last) {
936                         rte_flow_error_set(error, EINVAL,
937                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
938                                 item, "Not supported last point for range");
939                         return -rte_errno;
940                 }
941
942                 if (!item->mask) {
943                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
944                         rte_flow_error_set(error, EINVAL,
945                                 RTE_FLOW_ERROR_TYPE_ITEM,
946                                 item, "Invalid fdir filter mask");
947                         return -rte_errno;
948                 }
949
950                 ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask;
951
952                 /* Only support dst addresses,  others should be masked */
953                 if (ipv6_mask->hdr.vtc_flow ||
954                     ipv6_mask->hdr.payload_len ||
955                     ipv6_mask->hdr.proto ||
956                     ipv6_mask->hdr.hop_limits) {
957                         rte_flow_error_set(error, EINVAL,
958                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
959                                 "Not supported by fdir filter, support dst ipv6");
960                         return -rte_errno;
961                 }
962
963                 /* check ipv6 src addr mask, ipv6 src addr is 16 bytes */
964                 for (i = 0; i < 16; i++) {
965                         if (ipv6_mask->hdr.src_addr[i] == UINT8_MAX) {
966                                 rte_flow_error_set(error, EINVAL,
967                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
968                                         "Not supported by fdir filter, do not support src ipv6");
969                                 return -rte_errno;
970                         }
971                 }
972
973                 if (!item->spec) {
974                         rte_flow_error_set(error, EINVAL,
975                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
976                                 "Not supported by fdir filter, ipv6 spec is NULL");
977                         return -rte_errno;
978                 }
979
980                 for (i = 0; i < 16; i++) {
981                         if (ipv6_mask->hdr.dst_addr[i] == UINT8_MAX)
982                                 rule->mask.dst_ipv6_mask |= 1 << i;
983                 }
984
985                 ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
986                 rte_memcpy(rule->hinic_fdir.dst_ipv6,
987                            ipv6_spec->hdr.dst_addr, 16);
988
989                 /*
990                  * Check if the next not void item is TCP or UDP or ICMP.
991                  */
992                 item = next_no_void_pattern(pattern, item);
993                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
994                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
995                     item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
996                     item->type != RTE_FLOW_ITEM_TYPE_ICMP6){
997                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
998                         rte_flow_error_set(error, EINVAL,
999                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
1000                                 "Not supported by fdir filter, support tcp, udp, icmp");
1001                         return -rte_errno;
1002                 }
1003         }
1004
1005         *in_out_item = item;
1006         return 0;
1007 }
1008
1009 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
1010                         __rte_unused const struct rte_flow_item pattern[],
1011                         __rte_unused struct hinic_fdir_rule *rule,
1012                         struct rte_flow_error *error)
1013 {
1014         const struct rte_flow_item *item = *in_out_item;
1015
1016         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1017                 rte_flow_error_set(error, EINVAL,
1018                         RTE_FLOW_ERROR_TYPE_ITEM,
1019                         item, "Not supported by normal fdir filter, not support l4");
1020                 return -rte_errno;
1021         }
1022
1023         return 0;
1024 }
1025
1026
1027 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
1028                                         struct hinic_fdir_rule *rule,
1029                                         struct rte_flow_error *error)
1030 {
1031         /* Check if the next not void item is END */
1032         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1033                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1034                 rte_flow_error_set(error, EINVAL,
1035                         RTE_FLOW_ERROR_TYPE_ITEM,
1036                         item, "Not supported by fdir filter, support end");
1037                 return -rte_errno;
1038         }
1039
1040         return 0;
1041 }
1042
1043 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
1044                                         const struct rte_flow_item pattern[],
1045                                         struct hinic_fdir_rule *rule,
1046                                         struct rte_flow_error *error)
1047 {
1048         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1049             hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1050             hinic_normal_item_check_l4(&item, pattern, rule, error) ||
1051             hinic_normal_item_check_end(item, rule, error))
1052                 return -rte_errno;
1053
1054         return 0;
1055 }
1056
1057 static int
1058 hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item,
1059                                 const struct rte_flow_item pattern[],
1060                                 struct hinic_fdir_rule *rule,
1061                                 struct rte_flow_error *error)
1062 {
1063         const struct rte_flow_item *item = *in_out_item;
1064         const struct rte_flow_item_tcp *tcp_spec;
1065         const struct rte_flow_item_tcp *tcp_mask;
1066         const struct rte_flow_item_udp *udp_spec;
1067         const struct rte_flow_item_udp *udp_mask;
1068
1069         if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
1070                 rule->mode = HINIC_FDIR_MODE_TCAM;
1071                 rule->mask.proto_mask = UINT16_MAX;
1072                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
1073         } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6) {
1074                 rule->mode = HINIC_FDIR_MODE_TCAM;
1075                 rule->mask.proto_mask = UINT16_MAX;
1076                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMPV6;
1077         } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1078                 rule->mode = HINIC_FDIR_MODE_TCAM;
1079         } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1080                 if (!item->mask) {
1081                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1082                         rte_flow_error_set(error, EINVAL,
1083                                 RTE_FLOW_ERROR_TYPE_ITEM,
1084                                 item, "Not supported by fdir filter, support src, dst ports");
1085                         return -rte_errno;
1086                 }
1087
1088                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1089
1090                 /*
1091                  * Only support src & dst ports, tcp flags,
1092                  * others should be masked.
1093                  */
1094                 if (tcp_mask->hdr.sent_seq ||
1095                         tcp_mask->hdr.recv_ack ||
1096                         tcp_mask->hdr.data_off ||
1097                         tcp_mask->hdr.rx_win ||
1098                         tcp_mask->hdr.cksum ||
1099                         tcp_mask->hdr.tcp_urp) {
1100                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1101                         rte_flow_error_set(error, EINVAL,
1102                                 RTE_FLOW_ERROR_TYPE_ITEM,
1103                                 item, "Not supported by fdir normal tcam filter");
1104                         return -rte_errno;
1105                 }
1106
1107                 rule->mode = HINIC_FDIR_MODE_TCAM;
1108                 rule->mask.proto_mask = UINT16_MAX;
1109                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1110                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1111
1112                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1113                 if (item->spec) {
1114                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1115                         rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port;
1116                         rule->hinic_fdir.src_port = tcp_spec->hdr.src_port;
1117                 }
1118         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1119                 /*
1120                  * Only care about src & dst ports,
1121                  * others should be masked.
1122                  */
1123                 if (!item->mask) {
1124                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1125                         rte_flow_error_set(error, EINVAL,
1126                                 RTE_FLOW_ERROR_TYPE_ITEM,
1127                                 item, "Not supported by fdir filter, support src, dst ports");
1128                         return -rte_errno;
1129                 }
1130
1131                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1132                 if (udp_mask->hdr.dgram_len ||
1133                         udp_mask->hdr.dgram_cksum) {
1134                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1135                         rte_flow_error_set(error, EINVAL,
1136                                 RTE_FLOW_ERROR_TYPE_ITEM,
1137                                 item, "Not supported by fdir filter, support udp");
1138                         return -rte_errno;
1139                 }
1140
1141                 rule->mode = HINIC_FDIR_MODE_TCAM;
1142                 rule->mask.proto_mask = UINT16_MAX;
1143                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1144                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1145
1146                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1147                 if (item->spec) {
1148                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1149                         rule->hinic_fdir.src_port = udp_spec->hdr.src_port;
1150                         rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port;
1151                 }
1152         } else {
1153                 (void)memset(rule,  0, sizeof(struct hinic_fdir_rule));
1154                 rte_flow_error_set(error, EINVAL,
1155                                 RTE_FLOW_ERROR_TYPE_ITEM,
1156                                 item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp");
1157                 return -rte_errno;
1158         }
1159
1160         item = next_no_void_pattern(pattern, item);
1161         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1162                 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1163                 rte_flow_error_set(error, EINVAL,
1164                         RTE_FLOW_ERROR_TYPE_ITEM,
1165                         item, "Not supported by fdir filter tcam normal, support end");
1166                 return -rte_errno;
1167         }
1168
1169         /* get next no void item */
1170         *in_out_item = item;
1171
1172         return 0;
1173 }
1174
1175 static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item,
1176                                         const struct rte_flow_item pattern[],
1177                                         struct hinic_fdir_rule *rule,
1178                                         struct rte_flow_error *error)
1179 {
1180         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1181                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1182                 hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) ||
1183                 hinic_normal_item_check_end(item, rule, error))
1184                 return -rte_errno;
1185
1186         return 0;
1187 }
1188
1189 static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item,
1190                                         const struct rte_flow_item pattern[],
1191                                         struct hinic_fdir_rule *rule,
1192                                         struct rte_flow_error *error)
1193 {
1194         const struct rte_flow_item *item = *in_out_item;
1195
1196         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1197                 item = next_no_void_pattern(pattern, item);
1198                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1199                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1200                         rte_flow_error_set(error, EINVAL,
1201                                 RTE_FLOW_ERROR_TYPE_ITEM,
1202                                 item, "Not supported by fdir filter, support vxlan");
1203                         return -rte_errno;
1204                 }
1205
1206                 *in_out_item = item;
1207         } else {
1208                 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1209                 rte_flow_error_set(error, EINVAL,
1210                                 RTE_FLOW_ERROR_TYPE_ITEM,
1211                                 item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp");
1212                 return -rte_errno;
1213         }
1214
1215         return 0;
1216 }
1217
1218 static int
1219 hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item,
1220                                 const struct rte_flow_item pattern[],
1221                                 struct hinic_fdir_rule *rule,
1222                                 struct rte_flow_error *error)
1223 {
1224         const struct rte_flow_item *item = *in_out_item;
1225
1226
1227         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
1228                 item = next_no_void_pattern(pattern, item);
1229                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1230                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1231                     item->type != RTE_FLOW_ITEM_TYPE_ANY) {
1232                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1233                         rte_flow_error_set(error, EINVAL,
1234                                 RTE_FLOW_ERROR_TYPE_ITEM,
1235                                 item, "Not supported by fdir filter, support tcp/udp");
1236                         return -rte_errno;
1237                 }
1238
1239                 *in_out_item = item;
1240         }
1241
1242         return 0;
1243 }
1244
1245 static int
1246 hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item,
1247                                 const struct rte_flow_item pattern[],
1248                                 struct hinic_fdir_rule *rule,
1249                                 struct rte_flow_error *error)
1250 {
1251         const struct rte_flow_item_tcp *tcp_spec;
1252         const struct rte_flow_item_tcp *tcp_mask;
1253         const struct rte_flow_item_udp *udp_spec;
1254         const struct rte_flow_item_udp *udp_mask;
1255         const struct rte_flow_item *item = *in_out_item;
1256
1257         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1258                 /* Not supported last point for range */
1259                 if (item->last) {
1260                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1261                         rte_flow_error_set(error, EINVAL,
1262                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1263                                 item, "Not supported last point for range");
1264                         return -rte_errno;
1265                 }
1266
1267                 /* get the TCP/UDP info */
1268                 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1269                         /*
1270                          * Only care about src & dst ports,
1271                          * others should be masked.
1272                          */
1273                         if (!item->mask) {
1274                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1275                                 rte_flow_error_set(error, EINVAL,
1276                                         RTE_FLOW_ERROR_TYPE_ITEM,
1277                                         item, "Not supported by fdir filter, support src, dst ports");
1278                                 return -rte_errno;
1279                         }
1280
1281                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1282                         if (tcp_mask->hdr.sent_seq ||
1283                                 tcp_mask->hdr.recv_ack ||
1284                                 tcp_mask->hdr.data_off ||
1285                                 tcp_mask->hdr.tcp_flags ||
1286                                 tcp_mask->hdr.rx_win ||
1287                                 tcp_mask->hdr.cksum ||
1288                                 tcp_mask->hdr.tcp_urp) {
1289                                 (void)memset(rule, 0,
1290                                         sizeof(struct hinic_fdir_rule));
1291                                 rte_flow_error_set(error, EINVAL,
1292                                         RTE_FLOW_ERROR_TYPE_ITEM,
1293                                         item, "Not supported by fdir filter, support tcp");
1294                                 return -rte_errno;
1295                         }
1296
1297                         rule->mode = HINIC_FDIR_MODE_TCAM;
1298                         rule->mask.tunnel_flag = UINT16_MAX;
1299                         rule->mask.tunnel_inner_src_port_mask =
1300                                                         tcp_mask->hdr.src_port;
1301                         rule->mask.tunnel_inner_dst_port_mask =
1302                                                         tcp_mask->hdr.dst_port;
1303                         rule->mask.proto_mask = UINT16_MAX;
1304
1305                         rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1306                         if (item->spec) {
1307                                 tcp_spec =
1308                                 (const struct rte_flow_item_tcp *)item->spec;
1309                                 rule->hinic_fdir.tunnel_inner_src_port =
1310                                                         tcp_spec->hdr.src_port;
1311                                 rule->hinic_fdir.tunnel_inner_dst_port =
1312                                                         tcp_spec->hdr.dst_port;
1313                         }
1314                 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1315                         /*
1316                          * Only care about src & dst ports,
1317                          * others should be masked.
1318                          */
1319                         if (!item->mask) {
1320                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1321                                 rte_flow_error_set(error, EINVAL,
1322                                         RTE_FLOW_ERROR_TYPE_ITEM,
1323                                         item, "Not supported by fdir filter, support src, dst ports");
1324                                 return -rte_errno;
1325                         }
1326
1327                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
1328                         if (udp_mask->hdr.dgram_len ||
1329                             udp_mask->hdr.dgram_cksum) {
1330                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1331                                 rte_flow_error_set(error, EINVAL,
1332                                         RTE_FLOW_ERROR_TYPE_ITEM,
1333                                         item, "Not supported by fdir filter, support udp");
1334                                 return -rte_errno;
1335                         }
1336
1337                         rule->mode = HINIC_FDIR_MODE_TCAM;
1338                         rule->mask.tunnel_flag = UINT16_MAX;
1339                         rule->mask.tunnel_inner_src_port_mask =
1340                                                         udp_mask->hdr.src_port;
1341                         rule->mask.tunnel_inner_dst_port_mask =
1342                                                         udp_mask->hdr.dst_port;
1343                         rule->mask.proto_mask = UINT16_MAX;
1344
1345                         rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1346                         if (item->spec) {
1347                                 udp_spec =
1348                                 (const struct rte_flow_item_udp *)item->spec;
1349                                 rule->hinic_fdir.tunnel_inner_src_port =
1350                                                         udp_spec->hdr.src_port;
1351                                 rule->hinic_fdir.tunnel_inner_dst_port =
1352                                                         udp_spec->hdr.dst_port;
1353                         }
1354                 } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1355                         rule->mode = HINIC_FDIR_MODE_TCAM;
1356                         rule->mask.tunnel_flag = UINT16_MAX;
1357                 } else {
1358                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1359                         rte_flow_error_set(error, EINVAL,
1360                                 RTE_FLOW_ERROR_TYPE_ITEM,
1361                                 item, "Not supported by fdir filter, support tcp/udp");
1362                         return -rte_errno;
1363                 }
1364
1365                 /* get next no void item */
1366                 *in_out_item = next_no_void_pattern(pattern, item);
1367         }
1368
1369         return 0;
1370 }
1371
1372 static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item,
1373                                         const struct rte_flow_item pattern[],
1374                                         struct hinic_fdir_rule *rule,
1375                                         struct rte_flow_error *error)
1376 {
1377         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1378                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1379                 hinic_tunnel_item_check_l4(&item, pattern, rule, error) ||
1380                 hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) ||
1381                 hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) ||
1382                 hinic_normal_item_check_end(item, rule, error))
1383                 return -rte_errno;
1384
1385         return 0;
1386 }
1387
1388 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1389                                         struct hinic_fdir_rule *rule,
1390                                         struct rte_flow_error *error)
1391 {
1392         /* Must be input direction */
1393         if (!attr->ingress) {
1394                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1395                 rte_flow_error_set(error, EINVAL,
1396                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1397                                    attr, "Only support ingress.");
1398                 return -rte_errno;
1399         }
1400
1401         /* Not supported */
1402         if (attr->egress) {
1403                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1404                 rte_flow_error_set(error, EINVAL,
1405                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1406                                    attr, "Not support egress.");
1407                 return -rte_errno;
1408         }
1409
1410         /* Not supported */
1411         if (attr->priority) {
1412                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1413                 rte_flow_error_set(error, EINVAL,
1414                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1415                         attr, "Not support priority.");
1416                 return -rte_errno;
1417         }
1418
1419         return 0;
1420 }
1421
1422 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1423                                 const struct rte_flow_action actions[],
1424                                 struct hinic_fdir_rule *rule,
1425                                 struct rte_flow_error *error)
1426 {
1427         const struct rte_flow_action *act;
1428
1429         /* Check if the first not void action is QUEUE */
1430         act = next_no_void_action(actions, NULL);
1431         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1432                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1433                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1434                         item, "Not supported action.");
1435                 return -rte_errno;
1436         }
1437
1438         rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1439
1440         /* Check if the next not void item is END */
1441         act = next_no_void_action(actions, act);
1442         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1443                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1444                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1445                         act, "Not supported action.");
1446                 return -rte_errno;
1447         }
1448
1449         return 0;
1450 }
1451
1452 /**
1453  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1454  * And get the flow director filter info BTW.
1455  * UDP/TCP/SCTP PATTERN:
1456  * The first not void item can be ETH or IPV4 or IPV6
1457  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1458  * The next not void item could be UDP or TCP(optional)
1459  * The next not void item must be END.
1460  * ACTION:
1461  * The first not void action should be QUEUE.
1462  * The second not void optional action should be MARK,
1463  * mark_id is a uint32_t number.
1464  * The next not void action should be END.
1465  * UDP/TCP pattern example:
1466  * ITEM          Spec                                       Mask
1467  * ETH            NULL                                    NULL
1468  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1469  *                   dst_addr  1.2.3.5                 0xFFFFFFFF
1470  * UDP/TCP    src_port  80                         0xFFFF
1471  *                   dst_port  80                         0xFFFF
1472  * END
1473  * Other members in mask and spec should set to 0x00.
1474  * Item->last should be NULL.
1475  */
1476 static int
1477 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1478                                const struct rte_flow_item pattern[],
1479                                const struct rte_flow_action actions[],
1480                                struct hinic_fdir_rule *rule,
1481                                struct rte_flow_error *error)
1482 {
1483         const struct rte_flow_item *item = NULL;
1484
1485         if (hinic_check_filter_arg(attr, pattern, actions, error))
1486                 return -rte_errno;
1487
1488         if (hinic_check_normal_item_ele(item, pattern, rule, error))
1489                 return -rte_errno;
1490
1491         if (hinic_check_normal_attr_ele(attr, rule, error))
1492                 return -rte_errno;
1493
1494         if (hinic_check_normal_act_ele(item, actions, rule, error))
1495                 return -rte_errno;
1496
1497         return 0;
1498 }
1499
1500 /**
1501  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1502  * And get the flow director filter info BTW.
1503  * UDP/TCP/SCTP PATTERN:
1504  * The first not void item can be ETH or IPV4 or IPV6
1505  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1506  * The next not void item can be ANY/TCP/UDP
1507  * ACTION:
1508  * The first not void action should be QUEUE.
1509  * The second not void optional action should be MARK,
1510  * mark_id is a uint32_t number.
1511  * The next not void action should be END.
1512  * UDP/TCP pattern example:
1513  * ITEM                 Spec                           Mask
1514  * ETH            NULL                                 NULL
1515  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1516  *                dst_addr  1.2.3.5                 0xFFFFFFFF
1517  * UDP/TCP        src_port  80                      0xFFFF
1518  *                dst_port  80                      0xFFFF
1519  * END
1520  * Other members in mask and spec should set to 0x00.
1521  * Item->last should be NULL.
1522  */
1523 static int
1524 hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr,
1525                                const struct rte_flow_item pattern[],
1526                                const struct rte_flow_action actions[],
1527                                struct hinic_fdir_rule *rule,
1528                                struct rte_flow_error *error)
1529 {
1530         const struct rte_flow_item *item = NULL;
1531
1532         if (hinic_check_filter_arg(attr, pattern, actions, error))
1533                 return -rte_errno;
1534
1535         if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error))
1536                 return -rte_errno;
1537
1538         if (hinic_check_normal_attr_ele(attr, rule, error))
1539                 return -rte_errno;
1540
1541         if (hinic_check_normal_act_ele(item, actions, rule, error))
1542                 return -rte_errno;
1543
1544         return 0;
1545 }
1546
1547 /**
1548  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1549  * And get the flow director filter info BTW.
1550  * UDP/TCP/SCTP PATTERN:
1551  * The first not void item can be ETH or IPV4 or IPV6
1552  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1553  * The next not void item must be UDP
1554  * The next not void item must be VXLAN(optional)
1555  * The first not void item can be ETH or IPV4 or IPV6
1556  * The next not void item could be ANY or UDP or TCP(optional)
1557  * The next not void item must be END.
1558  * ACTION:
1559  * The first not void action should be QUEUE.
1560  * The second not void optional action should be MARK,
1561  * mark_id is a uint32_t number.
1562  * The next not void action should be END.
1563  * UDP/TCP pattern example:
1564  * ITEM             Spec                            Mask
1565  * ETH            NULL                              NULL
1566  * IPV4        src_addr  1.2.3.6                 0xFFFFFFFF
1567  *             dst_addr  1.2.3.5                 0xFFFFFFFF
1568  * UDP            NULL                              NULL
1569  * VXLAN          NULL                              NULL
1570  * UDP/TCP     src_port  80                      0xFFFF
1571  *             dst_port  80                      0xFFFF
1572  * END
1573  * Other members in mask and spec should set to 0x00.
1574  * Item->last should be NULL.
1575  */
1576 static int
1577 hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr,
1578                                const struct rte_flow_item pattern[],
1579                                const struct rte_flow_action actions[],
1580                                struct hinic_fdir_rule *rule,
1581                                struct rte_flow_error *error)
1582 {
1583         const struct rte_flow_item *item = NULL;
1584
1585         if (hinic_check_filter_arg(attr, pattern, actions, error))
1586                 return -rte_errno;
1587
1588         if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error))
1589                 return -rte_errno;
1590
1591         if (hinic_check_normal_attr_ele(attr, rule, error))
1592                 return -rte_errno;
1593
1594         if (hinic_check_normal_act_ele(item, actions, rule, error))
1595                 return -rte_errno;
1596
1597         return 0;
1598 }
1599
1600 static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1601                         const struct rte_flow_attr *attr,
1602                         const struct rte_flow_item pattern[],
1603                         const struct rte_flow_action actions[],
1604                         struct hinic_fdir_rule *rule,
1605                         struct rte_flow_error *error)
1606 {
1607         int ret;
1608
1609         ret = hinic_parse_fdir_filter_normal(attr, pattern, actions,
1610                                                 rule, error);
1611         if (!ret)
1612                 goto step_next;
1613
1614         ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions,
1615                                                 rule, error);
1616         if (!ret)
1617                 goto step_next;
1618
1619         ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions,
1620                                                 rule, error);
1621         if (ret)
1622                 return ret;
1623
1624 step_next:
1625         if (rule->queue >= dev->data->nb_rx_queues)
1626                 return -ENOTSUP;
1627
1628         return ret;
1629 }
1630
1631 /**
1632  * Check if the flow rule is supported by nic.
1633  * It only checkes the format. Don't guarantee the rule can be programmed into
1634  * the HW. Because there can be no enough room for the rule.
1635  */
1636 static int hinic_flow_validate(struct rte_eth_dev *dev,
1637                                 const struct rte_flow_attr *attr,
1638                                 const struct rte_flow_item pattern[],
1639                                 const struct rte_flow_action actions[],
1640                                 struct rte_flow_error *error)
1641 {
1642         struct rte_eth_ethertype_filter ethertype_filter;
1643         struct rte_eth_ntuple_filter ntuple_filter;
1644         struct hinic_fdir_rule fdir_rule;
1645         int ret;
1646
1647         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1648         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1649                                 actions, &ntuple_filter, error);
1650         if (!ret)
1651                 return 0;
1652
1653         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1654         ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1655                                 actions, &ethertype_filter, error);
1656
1657         if (!ret)
1658                 return 0;
1659
1660         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1661         ret = hinic_parse_fdir_filter(dev, attr, pattern,
1662                                 actions, &fdir_rule, error);
1663
1664         return ret;
1665 }
1666
1667 static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1668                  struct hinic_5tuple_filter_info *hinic_filter_info)
1669 {
1670         switch (filter->dst_ip_mask) {
1671         case UINT32_MAX:
1672                 hinic_filter_info->dst_ip_mask = 0;
1673                 hinic_filter_info->dst_ip = filter->dst_ip;
1674                 break;
1675         case 0:
1676                 hinic_filter_info->dst_ip_mask = 1;
1677                 hinic_filter_info->dst_ip = 0;
1678                 break;
1679         default:
1680                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1681                 return -EINVAL;
1682         }
1683
1684         switch (filter->src_ip_mask) {
1685         case UINT32_MAX:
1686                 hinic_filter_info->src_ip_mask = 0;
1687                 hinic_filter_info->src_ip = filter->src_ip;
1688                 break;
1689         case 0:
1690                 hinic_filter_info->src_ip_mask = 1;
1691                 hinic_filter_info->src_ip = 0;
1692                 break;
1693         default:
1694                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1695                 return -EINVAL;
1696         }
1697         return 0;
1698 }
1699
1700 static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1701                    struct hinic_5tuple_filter_info *hinic_filter_info)
1702 {
1703         switch (filter->dst_port_mask) {
1704         case UINT16_MAX:
1705                 hinic_filter_info->dst_port_mask = 0;
1706                 hinic_filter_info->dst_port = filter->dst_port;
1707                 break;
1708         case 0:
1709                 hinic_filter_info->dst_port_mask = 1;
1710                 hinic_filter_info->dst_port = 0;
1711                 break;
1712         default:
1713                 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1714                 return -EINVAL;
1715         }
1716
1717         switch (filter->src_port_mask) {
1718         case UINT16_MAX:
1719                 hinic_filter_info->src_port_mask = 0;
1720                 hinic_filter_info->src_port = filter->src_port;
1721                 break;
1722         case 0:
1723                 hinic_filter_info->src_port_mask = 1;
1724                 hinic_filter_info->src_port = 0;
1725                 break;
1726         default:
1727                 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1728                 return -EINVAL;
1729         }
1730
1731         return 0;
1732 }
1733
1734 static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1735                     struct hinic_5tuple_filter_info *hinic_filter_info)
1736 {
1737         switch (filter->proto_mask) {
1738         case UINT8_MAX:
1739                 hinic_filter_info->proto_mask = 0;
1740                 hinic_filter_info->proto = filter->proto;
1741                 break;
1742         case 0:
1743                 hinic_filter_info->proto_mask = 1;
1744                 hinic_filter_info->proto = 0;
1745                 break;
1746         default:
1747                 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1748                 return -EINVAL;
1749         }
1750
1751         return 0;
1752 }
1753
1754 static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1755                         struct hinic_5tuple_filter_info *filter_info)
1756 {
1757         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1758                 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1759                 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1760                 return -EINVAL;
1761
1762         if (ntuple_ip_filter(filter, filter_info) ||
1763                 ntuple_port_filter(filter, filter_info) ||
1764                 ntuple_proto_filter(filter, filter_info))
1765                 return -EINVAL;
1766
1767         filter_info->priority = (uint8_t)filter->priority;
1768         return 0;
1769 }
1770
1771 static inline struct hinic_5tuple_filter *
1772 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1773                            struct hinic_5tuple_filter_info *key)
1774 {
1775         struct hinic_5tuple_filter *it;
1776
1777         TAILQ_FOREACH(it, filter_list, entries) {
1778                 if (memcmp(key, &it->filter_info,
1779                         sizeof(struct hinic_5tuple_filter_info)) == 0) {
1780                         return it;
1781                 }
1782         }
1783
1784         return NULL;
1785 }
1786
1787 static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
1788 {
1789         struct tag_pa_rule lacp_rule;
1790         struct tag_pa_action lacp_action;
1791
1792         memset(&lacp_rule, 0, sizeof(lacp_rule));
1793         memset(&lacp_action, 0, sizeof(lacp_action));
1794         /* LACP TCAM rule */
1795         lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
1796         lacp_rule.l2_header.eth_type.val16 = 0x8809;
1797         lacp_rule.l2_header.eth_type.mask16 = 0xffff;
1798
1799         /* LACP TCAM action */
1800         lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
1801         lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1802         lacp_action.pkt_type = PKT_LACP_TYPE;
1803         lacp_action.pri = 0x0;
1804         lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
1805
1806         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
1807                                         &lacp_rule, &lacp_action);
1808 }
1809
1810 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1811 {
1812         struct tag_pa_rule bgp_rule;
1813         struct tag_pa_action bgp_action;
1814
1815         memset(&bgp_rule, 0, sizeof(bgp_rule));
1816         memset(&bgp_action, 0, sizeof(bgp_action));
1817         /* BGP TCAM rule */
1818         bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1819         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1820         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1821         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1822         bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1823         bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1824
1825         /* BGP TCAM action */
1826         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1827         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1828         bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1829         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1830                                * results, not need to convert
1831                                */
1832         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1833
1834         return hinic_set_fdir_tcam(nic_dev->hwdev,
1835                         TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1836 }
1837
1838 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1839 {
1840         struct tag_pa_rule bgp_rule;
1841         struct tag_pa_action bgp_action;
1842
1843         memset(&bgp_rule, 0, sizeof(bgp_rule));
1844         memset(&bgp_action, 0, sizeof(bgp_action));
1845         /* BGP TCAM rule */
1846         bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1847         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1848         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1849         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1850         bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1851         bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1852
1853         /* BGP TCAM action */
1854         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1855         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1856         bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1857         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1858                                * results, not need to convert
1859                                */
1860         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1861
1862         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1863                                         &bgp_rule, &bgp_action);
1864 }
1865
1866 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1867 {
1868         struct tag_pa_rule vrrp_rule;
1869         struct tag_pa_action vrrp_action;
1870
1871         memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1872         memset(&vrrp_action, 0, sizeof(vrrp_action));
1873         /* VRRP TCAM rule */
1874         vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1875         vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1876         vrrp_rule.ip_header.protocol.mask8 = 0xff;
1877         vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1878
1879         /* VRRP TCAM action */
1880         vrrp_action.err_type = 0x3f;
1881         vrrp_action.fwd_action = 0x7;
1882         vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1883         vrrp_action.pri = 0xf;
1884         vrrp_action.push_len = 0xf;
1885
1886         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1887                                         &vrrp_rule, &vrrp_action);
1888 }
1889
1890 /**
1891  *  Clear all fdir configuration.
1892  *
1893  * @param nic_dev
1894  *   The hardware interface of a Ethernet device.
1895  *
1896  * @return
1897  *   0 on success,
1898  *   negative error value otherwise.
1899  */
1900 void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
1901 {
1902         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
1903
1904         (void)hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false);
1905
1906         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
1907
1908         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
1909
1910         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1911
1912         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
1913
1914         (void)hinic_flush_tcam_rule(nic_dev->hwdev);
1915 }
1916
1917 static int hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1918                        struct hinic_filter_info *filter_info)
1919 {
1920         switch (filter->filter_info.proto) {
1921         case IPPROTO_TCP:
1922                 /* Filter type is bgp type if dst_port or src_port is 179 */
1923                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1924                         !(filter->filter_info.dst_port_mask)) {
1925                         filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1926                 } else if (filter->filter_info.src_port ==
1927                         RTE_BE16(BGP_DPORT_ID) &&
1928                         !(filter->filter_info.src_port_mask)) {
1929                         filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1930                 } else {
1931                         PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1932                         " just support BGP now, proto:0x%x, "
1933                         "dst_port:0x%x, dst_port_mask:0x%x."
1934                         "src_port:0x%x, src_port_mask:0x%x.",
1935                         filter->filter_info.proto,
1936                         filter->filter_info.dst_port,
1937                         filter->filter_info.dst_port_mask,
1938                         filter->filter_info.src_port,
1939                         filter->filter_info.src_port_mask);
1940                         return -EINVAL;
1941                 }
1942                 break;
1943
1944         case IPPROTO_VRRP:
1945                 filter_info->pkt_type = PKT_VRRP_TYPE;
1946                 break;
1947
1948         case IPPROTO_ICMP:
1949                 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1950                 break;
1951
1952         case IPPROTO_ICMPV6:
1953                 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1954                 break;
1955
1956         default:
1957                 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1958                 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1959                 "src_port: 0x%x, src_port_mask: 0x%x.",
1960                 filter->filter_info.proto, filter->filter_info.dst_port,
1961                 filter->filter_info.dst_port_mask,
1962                 filter->filter_info.src_port,
1963                 filter->filter_info.src_port_mask);
1964                 return -EINVAL;
1965         }
1966
1967         return 0;
1968 }
1969
1970 static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1971                         struct hinic_filter_info *filter_info, int *index)
1972 {
1973         int type_id;
1974
1975         type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1976
1977         if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1978                 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1979                 return -EINVAL;
1980         }
1981
1982         if (!(filter_info->type_mask & (1 << type_id))) {
1983                 filter_info->type_mask |= 1 << type_id;
1984                 filter->index = type_id;
1985                 filter_info->pkt_filters[type_id].enable = true;
1986                 filter_info->pkt_filters[type_id].pkt_proto =
1987                                                 filter->filter_info.proto;
1988                 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1989                                   filter, entries);
1990         } else {
1991                 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1992                 return -EIO;
1993         }
1994
1995         *index = type_id;
1996         return 0;
1997 }
1998
1999 /*
2000  * Add a 5tuple filter
2001  *
2002  * @param dev:
2003  *  Pointer to struct rte_eth_dev.
2004  * @param filter:
2005  *  Pointer to the filter that will be added.
2006  * @return
2007  *    - On success, zero.
2008  *    - On failure, a negative value.
2009  */
2010 static int hinic_add_5tuple_filter(struct rte_eth_dev *dev,
2011                                 struct hinic_5tuple_filter *filter)
2012 {
2013         struct hinic_filter_info *filter_info =
2014                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2015         int i, ret_fw;
2016         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2017
2018         if (hinic_filter_info_init(filter, filter_info) ||
2019                 hinic_lookup_new_filter(filter, filter_info, &i))
2020                 return -EFAULT;
2021
2022         ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2023                                         filter_info->qid,
2024                                         filter_info->pkt_filters[i].enable,
2025                                         true);
2026         if (ret_fw) {
2027                 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2028                         filter_info->pkt_type, filter->queue,
2029                         filter_info->pkt_filters[i].enable);
2030                 return -EFAULT;
2031         }
2032
2033         PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2034                         filter_info->pkt_type, filter_info->qid,
2035                         filter_info->pkt_filters[filter->index].enable);
2036
2037         switch (filter->filter_info.proto) {
2038         case IPPROTO_TCP:
2039                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
2040                         ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
2041                         if (ret_fw) {
2042                                 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
2043                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
2044                                         filter_info->pkt_type, filter->queue,
2045                                         filter_info->pkt_filters[i].enable);
2046                                 return -EFAULT;
2047                         }
2048
2049                         PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
2050                                 filter->queue,
2051                                 filter_info->pkt_filters[i].enable);
2052                 } else if (filter->filter_info.src_port ==
2053                         RTE_BE16(BGP_DPORT_ID)) {
2054                         ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
2055                         if (ret_fw) {
2056                                 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
2057                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
2058                                         filter_info->pkt_type, filter->queue,
2059                                         filter_info->pkt_filters[i].enable);
2060                                 return -EFAULT;
2061                         }
2062
2063                         PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
2064                                         filter->queue,
2065                                         filter_info->pkt_filters[i].enable);
2066                 }
2067
2068                 break;
2069
2070         case IPPROTO_VRRP:
2071                 ret_fw = hinic_set_vrrp_tcam(nic_dev);
2072                 if (ret_fw) {
2073                         PMD_DRV_LOG(ERR, "Set VRRP failed, "
2074                                 "type: 0x%x, qid: 0x%x, enable: 0x%x",
2075                                 filter_info->pkt_type, filter->queue,
2076                                 filter_info->pkt_filters[i].enable);
2077                         return -EFAULT;
2078                 }
2079                 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
2080                                 filter->queue,
2081                                 filter_info->pkt_filters[i].enable);
2082                 break;
2083
2084         default:
2085                 break;
2086         }
2087
2088         return 0;
2089 }
2090
2091 /*
2092  * Remove a 5tuple filter
2093  *
2094  * @param dev
2095  *  Pointer to struct rte_eth_dev.
2096  * @param filter
2097  *  The pointer of the filter will be removed.
2098  */
2099 static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
2100                            struct hinic_5tuple_filter *filter)
2101 {
2102         struct hinic_filter_info *filter_info =
2103                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2104         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2105
2106         switch (filter->filter_info.proto) {
2107         case IPPROTO_VRRP:
2108                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
2109                 break;
2110
2111         case IPPROTO_TCP:
2112                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
2113                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2114                                                         TCAM_PKT_BGP_DPORT);
2115                 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
2116                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2117                                                         TCAM_PKT_BGP_SPORT);
2118                 break;
2119
2120         default:
2121                 break;
2122         }
2123
2124         hinic_filter_info_init(filter, filter_info);
2125
2126         filter_info->pkt_filters[filter->index].enable = false;
2127         filter_info->pkt_filters[filter->index].pkt_proto = 0;
2128
2129         PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2130                 filter_info->pkt_type,
2131                 filter_info->pkt_filters[filter->index].qid,
2132                 filter_info->pkt_filters[filter->index].enable);
2133         (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2134                                 filter_info->pkt_filters[filter->index].qid,
2135                                 filter_info->pkt_filters[filter->index].enable,
2136                                 true);
2137
2138         filter_info->pkt_type = 0;
2139         filter_info->qid = 0;
2140         filter_info->pkt_filters[filter->index].qid = 0;
2141         filter_info->type_mask &= ~(1 <<  (filter->index));
2142         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
2143
2144         rte_free(filter);
2145 }
2146
2147 /*
2148  * Add or delete a ntuple filter
2149  *
2150  * @param dev
2151  *  Pointer to struct rte_eth_dev.
2152  * @param ntuple_filter
2153  *  Pointer to struct rte_eth_ntuple_filter
2154  * @param add
2155  *  If true, add filter; if false, remove filter
2156  * @return
2157  *    - On success, zero.
2158  *    - On failure, a negative value.
2159  */
2160 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
2161                                 struct rte_eth_ntuple_filter *ntuple_filter,
2162                                 bool add)
2163 {
2164         struct hinic_filter_info *filter_info =
2165                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2166         struct hinic_5tuple_filter_info filter_5tuple;
2167         struct hinic_5tuple_filter *filter;
2168         int ret;
2169
2170         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
2171                 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
2172                 return -EINVAL;
2173         }
2174
2175         memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
2176         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
2177         if (ret < 0)
2178                 return ret;
2179
2180         filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
2181                                          &filter_5tuple);
2182         if (filter != NULL && add) {
2183                 PMD_DRV_LOG(ERR, "Filter exists.");
2184                 return -EEXIST;
2185         }
2186         if (filter == NULL && !add) {
2187                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2188                 return -ENOENT;
2189         }
2190
2191         if (add) {
2192                 filter = rte_zmalloc("hinic_5tuple_filter",
2193                                 sizeof(struct hinic_5tuple_filter), 0);
2194                 if (filter == NULL)
2195                         return -ENOMEM;
2196                 rte_memcpy(&filter->filter_info, &filter_5tuple,
2197                                 sizeof(struct hinic_5tuple_filter_info));
2198                 filter->queue = ntuple_filter->queue;
2199
2200                 filter_info->qid = ntuple_filter->queue;
2201
2202                 ret = hinic_add_5tuple_filter(dev, filter);
2203                 if (ret)
2204                         rte_free(filter);
2205
2206                 return ret;
2207         }
2208
2209         hinic_remove_5tuple_filter(dev, filter);
2210
2211         return 0;
2212 }
2213
2214 static inline int
2215 hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
2216 {
2217         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
2218                 return -EINVAL;
2219
2220         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2221                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
2222                 PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
2223                         " ethertype filter", filter->ether_type);
2224                 return -EINVAL;
2225         }
2226
2227         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
2228                 PMD_DRV_LOG(ERR, "Mac compare is not supported");
2229                 return -EINVAL;
2230         }
2231         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2232                 PMD_DRV_LOG(ERR, "Drop option is not supported");
2233                 return -EINVAL;
2234         }
2235
2236         return 0;
2237 }
2238
2239 static inline int
2240 hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
2241                               struct hinic_pkt_filter *ethertype_filter)
2242 {
2243         switch (ethertype_filter->pkt_proto) {
2244         case RTE_ETHER_TYPE_SLOW:
2245                 filter_info->pkt_type = PKT_LACP_TYPE;
2246                 break;
2247
2248         case RTE_ETHER_TYPE_ARP:
2249                 filter_info->pkt_type = PKT_ARP_TYPE;
2250                 break;
2251
2252         default:
2253                 PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
2254                 return -EIO;
2255         }
2256
2257         return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
2258 }
2259
2260 static inline int
2261 hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
2262                               struct hinic_pkt_filter *ethertype_filter)
2263 {
2264         int id;
2265
2266         /* Find LACP or VRRP type id */
2267         id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
2268         if (id < 0)
2269                 return -EINVAL;
2270
2271         if (!(filter_info->type_mask & (1 << id))) {
2272                 filter_info->type_mask |= 1 << id;
2273                 filter_info->pkt_filters[id].pkt_proto =
2274                         ethertype_filter->pkt_proto;
2275                 filter_info->pkt_filters[id].enable = ethertype_filter->enable;
2276                 filter_info->qid = ethertype_filter->qid;
2277                 return id;
2278         }
2279
2280         PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
2281         return -EINVAL;
2282 }
2283
2284 static inline void
2285 hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
2286                               uint8_t idx)
2287 {
2288         if (idx >= HINIC_MAX_Q_FILTERS)
2289                 return;
2290
2291         filter_info->pkt_type = 0;
2292         filter_info->type_mask &= ~(1 << idx);
2293         filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
2294         filter_info->pkt_filters[idx].enable = FALSE;
2295         filter_info->pkt_filters[idx].qid = 0;
2296 }
2297
2298 static inline int
2299 hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
2300                                struct rte_eth_ethertype_filter *filter,
2301                                bool add)
2302 {
2303         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2304         struct hinic_filter_info *filter_info =
2305                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2306         struct hinic_pkt_filter ethertype_filter;
2307         int i;
2308         int ret_fw;
2309
2310         if (hinic_check_ethertype_filter(filter))
2311                 return -EINVAL;
2312
2313         if (add) {
2314                 ethertype_filter.pkt_proto = filter->ether_type;
2315                 ethertype_filter.enable = TRUE;
2316                 ethertype_filter.qid = (u8)filter->queue;
2317                 i = hinic_ethertype_filter_insert(filter_info,
2318                                                     &ethertype_filter);
2319                 if (i < 0)
2320                         return -ENOSPC;
2321
2322                 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
2323                                 filter_info->pkt_type, filter_info->qid,
2324                                 filter_info->pkt_filters[i].enable, true);
2325                 if (ret_fw) {
2326                         PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2327                                 filter_info->pkt_type, filter->queue,
2328                                 filter_info->pkt_filters[i].enable);
2329
2330                         hinic_ethertype_filter_remove(filter_info, i);
2331                         return -ENOENT;
2332                 }
2333                 PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2334                                 filter_info->pkt_type, filter->queue,
2335                                 filter_info->pkt_filters[i].enable);
2336
2337                 switch (ethertype_filter.pkt_proto) {
2338                 case RTE_ETHER_TYPE_SLOW:
2339                         ret_fw = hinic_set_lacp_tcam(nic_dev);
2340                         if (ret_fw) {
2341                                 PMD_DRV_LOG(ERR, "Add lacp tcam failed");
2342                                 hinic_ethertype_filter_remove(filter_info, i);
2343                                 return -ENOENT;
2344                         }
2345
2346                         PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
2347                         break;
2348                 default:
2349                         break;
2350                 }
2351         } else {
2352                 ethertype_filter.pkt_proto = filter->ether_type;
2353                 i = hinic_ethertype_filter_lookup(filter_info,
2354                                                 &ethertype_filter);
2355                 if (i < 0)
2356                         return -EINVAL;
2357
2358                 if ((filter_info->type_mask & (1 << i))) {
2359                         filter_info->pkt_filters[i].enable = FALSE;
2360                         (void)hinic_set_fdir_filter(nic_dev->hwdev,
2361                                         filter_info->pkt_type,
2362                                         filter_info->pkt_filters[i].qid,
2363                                         filter_info->pkt_filters[i].enable,
2364                                         true);
2365
2366                         PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2367                                         filter_info->pkt_type,
2368                                         filter_info->pkt_filters[i].qid,
2369                                         filter_info->pkt_filters[i].enable);
2370
2371                         switch (ethertype_filter.pkt_proto) {
2372                         case RTE_ETHER_TYPE_SLOW:
2373                                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2374                                                                 TCAM_PKT_LACP);
2375                                 PMD_DRV_LOG(INFO, "Del lacp tcam succeed");
2376                                 break;
2377                         default:
2378                                 break;
2379                         }
2380
2381                         hinic_ethertype_filter_remove(filter_info, i);
2382
2383                 } else {
2384                         PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
2385                                         filter_info->pkt_type, filter->queue,
2386                                         filter_info->pkt_filters[i].enable);
2387                         return -ENOENT;
2388                 }
2389         }
2390
2391         return 0;
2392 }
2393
2394 static int hinic_fdir_info_init(struct hinic_fdir_rule *rule,
2395                                 struct hinic_fdir_info *fdir_info)
2396 {
2397         switch (rule->mask.src_ipv4_mask) {
2398         case UINT32_MAX:
2399                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
2400                 fdir_info->qid = rule->queue;
2401                 fdir_info->fdir_key = rule->hinic_fdir.src_ip;
2402                 return 0;
2403
2404         case 0:
2405                 break;
2406
2407         default:
2408                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
2409                 return -EINVAL;
2410         }
2411
2412         switch (rule->mask.dst_ipv4_mask) {
2413         case UINT32_MAX:
2414                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
2415                 fdir_info->qid = rule->queue;
2416                 fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
2417                 return 0;
2418
2419         case 0:
2420                 break;
2421
2422         default:
2423                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
2424                 return -EINVAL;
2425         }
2426
2427         if (fdir_info->fdir_flag == 0) {
2428                 PMD_DRV_LOG(ERR, "All support mask is NULL.");
2429                 return -EINVAL;
2430         }
2431
2432         return 0;
2433 }
2434
2435 static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
2436                                         struct hinic_fdir_rule *rule, bool add)
2437 {
2438         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2439         struct hinic_fdir_info fdir_info;
2440         int ret;
2441
2442         memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
2443
2444         ret = hinic_fdir_info_init(rule, &fdir_info);
2445         if (ret) {
2446                 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2447                 return ret;
2448         }
2449
2450         if (add) {
2451                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2452                                                 true, fdir_info.fdir_key,
2453                                                 true, fdir_info.fdir_flag);
2454                 if (ret) {
2455                         PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2456                                         fdir_info.fdir_flag, fdir_info.qid,
2457                                         fdir_info.fdir_key);
2458                         return -ENOENT;
2459                 }
2460                 PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2461                                 fdir_info.fdir_flag, fdir_info.qid,
2462                                 fdir_info.fdir_key);
2463         } else {
2464                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2465                                                 false, fdir_info.fdir_key, true,
2466                                                 fdir_info.fdir_flag);
2467                 if (ret) {
2468                         PMD_DRV_LOG(ERR, "Del fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2469                                 fdir_info.fdir_flag, fdir_info.qid,
2470                                 fdir_info.fdir_key);
2471                         return -ENOENT;
2472                 }
2473                 PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2474                                 fdir_info.fdir_flag, fdir_info.qid,
2475                                 fdir_info.fdir_key);
2476         }
2477
2478         return 0;
2479 }
2480
2481 static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len)
2482 {
2483         u8 idx;
2484
2485         for (idx = 0; idx < len; idx++)
2486                 key_y[idx] = src_input[idx] & mask[idx];
2487 }
2488
2489 static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len)
2490 {
2491         u8 idx;
2492
2493         for (idx = 0; idx < len; idx++)
2494                 key_x[idx] = key_y[idx] ^ mask[idx];
2495 }
2496
2497 static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
2498                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2499 {
2500         tcam_translate_key_y(fdir_tcam_rule->key.y,
2501                 (u8 *)(&tcam_key->key_info),
2502                 (u8 *)(&tcam_key->key_mask),
2503                 TCAM_FLOW_KEY_SIZE);
2504         tcam_translate_key_x(fdir_tcam_rule->key.x,
2505                 fdir_tcam_rule->key.y,
2506                 (u8 *)(&tcam_key->key_mask),
2507                 TCAM_FLOW_KEY_SIZE);
2508 }
2509
2510 static int hinic_fdir_tcam_ipv4_init(struct rte_eth_dev *dev,
2511                                      struct hinic_fdir_rule *rule,
2512                                      struct tag_tcam_key *tcam_key)
2513 {
2514         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2515
2516         switch (rule->mask.dst_ipv4_mask) {
2517         case UINT32_MAX:
2518                 tcam_key->key_info.ext_dip_h =
2519                         (rule->hinic_fdir.dst_ip >> 16) & 0xffffU;
2520                 tcam_key->key_info.ext_dip_l =
2521                         rule->hinic_fdir.dst_ip & 0xffffU;
2522                 tcam_key->key_mask.ext_dip_h =
2523                         (rule->mask.dst_ipv4_mask >> 16) & 0xffffU;
2524                 tcam_key->key_mask.ext_dip_l =
2525                         rule->mask.dst_ipv4_mask & 0xffffU;
2526                 break;
2527
2528         case 0:
2529                 break;
2530
2531         default:
2532                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2533                 return -EINVAL;
2534         }
2535
2536         if (rule->mask.dst_port_mask > 0) {
2537                 tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port;
2538                 tcam_key->key_mask.dst_port = rule->mask.dst_port_mask;
2539         }
2540
2541         if (rule->mask.src_port_mask > 0) {
2542                 tcam_key->key_info.src_port = rule->hinic_fdir.src_port;
2543                 tcam_key->key_mask.src_port = rule->mask.src_port_mask;
2544         }
2545
2546         switch (rule->mask.tunnel_flag) {
2547         case UINT16_MAX:
2548                 tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET;
2549                 tcam_key->key_mask.tunnel_flag = UINT8_MAX;
2550                 break;
2551
2552         case 0:
2553                 tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET;
2554                 tcam_key->key_mask.tunnel_flag = 0;
2555                 break;
2556
2557         default:
2558                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2559                 return -EINVAL;
2560         }
2561
2562         if (rule->mask.tunnel_inner_dst_port_mask > 0) {
2563                 tcam_key->key_info.dst_port =
2564                                         rule->hinic_fdir.tunnel_inner_dst_port;
2565                 tcam_key->key_mask.dst_port =
2566                                         rule->mask.tunnel_inner_dst_port_mask;
2567         }
2568
2569         if (rule->mask.tunnel_inner_src_port_mask > 0) {
2570                 tcam_key->key_info.src_port =
2571                                         rule->hinic_fdir.tunnel_inner_src_port;
2572                 tcam_key->key_mask.src_port =
2573                                         rule->mask.tunnel_inner_src_port_mask;
2574         }
2575
2576         switch (rule->mask.proto_mask) {
2577         case UINT16_MAX:
2578                 tcam_key->key_info.protocol = rule->hinic_fdir.proto;
2579                 tcam_key->key_mask.protocol = UINT8_MAX;
2580                 break;
2581
2582         case 0:
2583                 break;
2584
2585         default:
2586                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2587                 return -EINVAL;
2588         }
2589
2590         tcam_key->key_mask.function_id = UINT16_MAX;
2591         tcam_key->key_info.function_id =
2592                 hinic_global_func_id(nic_dev->hwdev) & 0x7fff;
2593
2594         return 0;
2595 }
2596
2597 static int hinic_fdir_tcam_ipv6_init(struct rte_eth_dev *dev,
2598                                      struct hinic_fdir_rule *rule,
2599                                      struct tag_tcam_key *tcam_key)
2600 {
2601         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2602
2603         switch (rule->mask.dst_ipv6_mask) {
2604         case UINT16_MAX:
2605                 tcam_key->key_info_ipv6.ipv6_key0 =
2606                         ((rule->hinic_fdir.dst_ipv6[0] << 8) & 0xff00) |
2607                         rule->hinic_fdir.dst_ipv6[1];
2608                 tcam_key->key_info_ipv6.ipv6_key1 =
2609                         ((rule->hinic_fdir.dst_ipv6[2] << 8) & 0xff00) |
2610                         rule->hinic_fdir.dst_ipv6[3];
2611                 tcam_key->key_info_ipv6.ipv6_key2 =
2612                         ((rule->hinic_fdir.dst_ipv6[4] << 8) & 0xff00) |
2613                         rule->hinic_fdir.dst_ipv6[5];
2614                 tcam_key->key_info_ipv6.ipv6_key3 =
2615                         ((rule->hinic_fdir.dst_ipv6[6] << 8) & 0xff00) |
2616                         rule->hinic_fdir.dst_ipv6[7];
2617                 tcam_key->key_info_ipv6.ipv6_key4 =
2618                         ((rule->hinic_fdir.dst_ipv6[8] << 8) & 0xff00) |
2619                         rule->hinic_fdir.dst_ipv6[9];
2620                 tcam_key->key_info_ipv6.ipv6_key5 =
2621                         ((rule->hinic_fdir.dst_ipv6[10] << 8) & 0xff00) |
2622                         rule->hinic_fdir.dst_ipv6[11];
2623                 tcam_key->key_info_ipv6.ipv6_key6 =
2624                         ((rule->hinic_fdir.dst_ipv6[12] << 8) & 0xff00) |
2625                         rule->hinic_fdir.dst_ipv6[13];
2626                 tcam_key->key_info_ipv6.ipv6_key7 =
2627                         ((rule->hinic_fdir.dst_ipv6[14] << 8) & 0xff00) |
2628                         rule->hinic_fdir.dst_ipv6[15];
2629                 tcam_key->key_mask_ipv6.ipv6_key0 = UINT16_MAX;
2630                 tcam_key->key_mask_ipv6.ipv6_key1 = UINT16_MAX;
2631                 tcam_key->key_mask_ipv6.ipv6_key2 = UINT16_MAX;
2632                 tcam_key->key_mask_ipv6.ipv6_key3 = UINT16_MAX;
2633                 tcam_key->key_mask_ipv6.ipv6_key4 = UINT16_MAX;
2634                 tcam_key->key_mask_ipv6.ipv6_key5 = UINT16_MAX;
2635                 tcam_key->key_mask_ipv6.ipv6_key6 = UINT16_MAX;
2636                 tcam_key->key_mask_ipv6.ipv6_key7 = UINT16_MAX;
2637                 break;
2638
2639         case 0:
2640                 break;
2641
2642         default:
2643                 PMD_DRV_LOG(ERR, "invalid dst_ipv6 mask");
2644                 return -EINVAL;
2645         }
2646
2647         if (rule->mask.dst_port_mask > 0) {
2648                 tcam_key->key_info_ipv6.dst_port = rule->hinic_fdir.dst_port;
2649                 tcam_key->key_mask_ipv6.dst_port = rule->mask.dst_port_mask;
2650         }
2651
2652         switch (rule->mask.proto_mask) {
2653         case UINT16_MAX:
2654                 tcam_key->key_info_ipv6.protocol =
2655                         (rule->hinic_fdir.proto) & 0x7F;
2656                 tcam_key->key_mask_ipv6.protocol = 0x7F;
2657                 break;
2658
2659         case 0:
2660                 break;
2661
2662         default:
2663                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask");
2664                 return -EINVAL;
2665         }
2666
2667         tcam_key->key_info_ipv6.ipv6_flag = 1;
2668         tcam_key->key_mask_ipv6.ipv6_flag = 1;
2669
2670         tcam_key->key_mask_ipv6.function_id = UINT8_MAX;
2671         tcam_key->key_info_ipv6.function_id =
2672                         (u8)hinic_global_func_id(nic_dev->hwdev);
2673
2674         return 0;
2675 }
2676
2677 static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
2678                                      struct hinic_fdir_rule *rule,
2679                                      struct tag_tcam_key *tcam_key,
2680                                      struct tag_tcam_cfg_rule *fdir_tcam_rule)
2681 {
2682         int ret = -1;
2683
2684         if (rule->mask.dst_ipv4_mask == UINT32_MAX)
2685                 ret = hinic_fdir_tcam_ipv4_init(dev, rule, tcam_key);
2686         else if (rule->mask.dst_ipv6_mask == UINT16_MAX)
2687                 ret = hinic_fdir_tcam_ipv6_init(dev, rule, tcam_key);
2688
2689         if (ret < 0)
2690                 return ret;
2691
2692         fdir_tcam_rule->data.qid = rule->queue;
2693
2694         tcam_key_calculate(tcam_key, fdir_tcam_rule);
2695
2696         return 0;
2697 }
2698
2699 static inline struct hinic_tcam_filter *
2700 hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list,
2701                         struct tag_tcam_key *key)
2702 {
2703         struct hinic_tcam_filter *it;
2704
2705         TAILQ_FOREACH(it, filter_list, entries) {
2706                 if (memcmp(key, &it->tcam_key,
2707                         sizeof(struct tag_tcam_key)) == 0) {
2708                         return it;
2709                 }
2710         }
2711
2712         return NULL;
2713 }
2714
2715 static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev,
2716                                         struct hinic_tcam_info *tcam_info,
2717                                         struct hinic_tcam_filter *tcam_filter,
2718                                         u16 *tcam_index)
2719 {
2720         int index;
2721         int max_index;
2722         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2723
2724         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2725                 max_index = HINIC_VF_MAX_TCAM_FILTERS;
2726         else
2727                 max_index = HINIC_PF_MAX_TCAM_FILTERS;
2728
2729         for (index = 0; index < max_index; index++) {
2730                 if (tcam_info->tcam_index_array[index] == 0)
2731                         break;
2732         }
2733
2734         if (index == max_index) {
2735                 PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules",
2736                         hinic_global_func_id(nic_dev->hwdev), max_index);
2737                 return -EINVAL;
2738         }
2739
2740         tcam_filter->index = index;
2741         *tcam_index = index;
2742
2743         return 0;
2744 }
2745
2746 static int hinic_add_tcam_filter(struct rte_eth_dev *dev,
2747                                 struct hinic_tcam_filter *tcam_filter,
2748                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2749 {
2750         struct hinic_tcam_info *tcam_info =
2751                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2752         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2753         u16 index = 0;
2754         u16 tcam_block_index = 0;
2755         int rc;
2756
2757         if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index))
2758                 return -EINVAL;
2759
2760         if (tcam_info->tcam_rule_nums == 0) {
2761                 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2762                         rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2763                                 HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index);
2764                         if (rc != 0) {
2765                                 PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block failed!");
2766                                 return -EFAULT;
2767                         }
2768                 } else {
2769                         rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2770                                 HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index);
2771                         if (rc != 0) {
2772                                 PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block failed!");
2773                                 return -EFAULT;
2774                         }
2775                 }
2776
2777                 tcam_info->tcam_block_index = tcam_block_index;
2778         } else {
2779                 tcam_block_index = tcam_info->tcam_block_index;
2780         }
2781
2782         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2783                 fdir_tcam_rule->index =
2784                         HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index;
2785         } else {
2786                 fdir_tcam_rule->index =
2787                         tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index;
2788         }
2789
2790         rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule);
2791         if (rc != 0) {
2792                 PMD_DRV_LOG(ERR, "Fdir_tcam_rule add failed!");
2793                 return -EFAULT;
2794         }
2795
2796         PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x,"
2797                 "tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succeed",
2798                 hinic_global_func_id(nic_dev->hwdev), tcam_block_index,
2799                 fdir_tcam_rule->index, fdir_tcam_rule->data.qid,
2800                 tcam_info->tcam_rule_nums + 1);
2801
2802         if (tcam_info->tcam_rule_nums == 0) {
2803                 rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true);
2804                 if (rc < 0) {
2805                         (void)hinic_del_tcam_rule(nic_dev->hwdev,
2806                                                 fdir_tcam_rule->index);
2807                         return rc;
2808                 }
2809
2810                 rc = hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, true);
2811                 if (rc && rc != HINIC_MGMT_CMD_UNSUPPORTED) {
2812                         /*
2813                          * hinic supports two methods: linear table and tcam
2814                          * table, if tcam filter enables failed but linear table
2815                          * is ok, which also needs to enable filter, so for this
2816                          * scene, driver should not close fdir switch.
2817                          */
2818                         (void)hinic_del_tcam_rule(nic_dev->hwdev,
2819                                                 fdir_tcam_rule->index);
2820                         return rc;
2821                 }
2822         }
2823
2824         TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries);
2825
2826         tcam_info->tcam_index_array[index] = 1;
2827         tcam_info->tcam_rule_nums++;
2828
2829         return 0;
2830 }
2831
2832 static int hinic_del_tcam_filter(struct rte_eth_dev *dev,
2833                                 struct hinic_tcam_filter *tcam_filter)
2834 {
2835         struct hinic_tcam_info *tcam_info =
2836                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2837         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2838         u32 index = 0;
2839         u16 tcam_block_index = tcam_info->tcam_block_index;
2840         int rc;
2841         u8 block_type = 0;
2842
2843         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2844                 index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) +
2845                         tcam_filter->index;
2846                 block_type = HINIC_TCAM_BLOCK_TYPE_VF;
2847         } else {
2848                 index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS +
2849                         tcam_filter->index;
2850                 block_type = HINIC_TCAM_BLOCK_TYPE_PF;
2851         }
2852
2853         rc = hinic_del_tcam_rule(nic_dev->hwdev, index);
2854         if (rc != 0) {
2855                 PMD_DRV_LOG(ERR, "fdir_tcam_rule del failed!");
2856                 return -EFAULT;
2857         }
2858
2859         PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, "
2860                 "tcam_block_id: %d, index: %d, tcam_rule_nums: %d succeed",
2861                 hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index,
2862                 tcam_info->tcam_rule_nums - 1);
2863
2864         TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries);
2865
2866         tcam_info->tcam_index_array[tcam_filter->index] = 0;
2867
2868         rte_free(tcam_filter);
2869
2870         tcam_info->tcam_rule_nums--;
2871
2872         if (tcam_info->tcam_rule_nums == 0) {
2873                 (void)hinic_free_tcam_block(nic_dev->hwdev, block_type,
2874                                         &tcam_block_index);
2875         }
2876
2877         return 0;
2878 }
2879
2880 static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
2881                                         struct hinic_fdir_rule *rule, bool add)
2882 {
2883         struct hinic_tcam_info *tcam_info =
2884                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2885         struct hinic_tcam_filter *tcam_filter;
2886         struct tag_tcam_cfg_rule fdir_tcam_rule;
2887         struct tag_tcam_key tcam_key;
2888         int ret;
2889
2890         memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule));
2891         memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key));
2892
2893         ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
2894         if (ret) {
2895                 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2896                 return ret;
2897         }
2898
2899         tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list,
2900                                                 &tcam_key);
2901         if (tcam_filter != NULL && add) {
2902                 PMD_DRV_LOG(ERR, "Filter exists.");
2903                 return -EEXIST;
2904         }
2905         if (tcam_filter == NULL && !add) {
2906                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2907                 return -ENOENT;
2908         }
2909
2910         if (add) {
2911                 tcam_filter = rte_zmalloc("hinic_5tuple_filter",
2912                                 sizeof(struct hinic_tcam_filter), 0);
2913                 if (tcam_filter == NULL)
2914                         return -ENOMEM;
2915                 (void)rte_memcpy(&tcam_filter->tcam_key,
2916                                  &tcam_key, sizeof(struct tag_tcam_key));
2917                 tcam_filter->queue = fdir_tcam_rule.data.qid;
2918
2919                 ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule);
2920                 if (ret < 0) {
2921                         rte_free(tcam_filter);
2922                         return ret;
2923                 }
2924
2925                 rule->tcam_index = fdir_tcam_rule.index;
2926
2927         } else {
2928                 PMD_DRV_LOG(INFO, "begin to hinic_del_tcam_filter");
2929                 ret = hinic_del_tcam_filter(dev, tcam_filter);
2930                 if (ret < 0)
2931                         return ret;
2932         }
2933
2934         return 0;
2935 }
2936
2937 /**
2938  * Create or destroy a flow rule.
2939  * Theorically one rule can match more than one filters.
2940  * We will let it use the filter which it hitt first.
2941  * So, the sequence matters.
2942  */
2943 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
2944                                         const struct rte_flow_attr *attr,
2945                                         const struct rte_flow_item pattern[],
2946                                         const struct rte_flow_action actions[],
2947                                         struct rte_flow_error *error)
2948 {
2949         int ret;
2950         struct rte_eth_ntuple_filter ntuple_filter;
2951         struct rte_eth_ethertype_filter ethertype_filter;
2952         struct hinic_fdir_rule fdir_rule;
2953         struct rte_flow *flow = NULL;
2954         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2955         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2956         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2957         struct hinic_flow_mem *hinic_flow_mem_ptr;
2958         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2959
2960         flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
2961         if (!flow) {
2962                 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
2963                 return NULL;
2964         }
2965
2966         hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
2967                         sizeof(struct hinic_flow_mem), 0);
2968         if (!hinic_flow_mem_ptr) {
2969                 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
2970                 rte_free(flow);
2971                 return NULL;
2972         }
2973
2974         hinic_flow_mem_ptr->flow = flow;
2975         TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2976                                 entries);
2977
2978         /* Add ntuple filter */
2979         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2980         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
2981                         actions, &ntuple_filter, error);
2982         if (!ret) {
2983                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2984                 if (!ret) {
2985                         ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
2986                                 sizeof(struct hinic_ntuple_filter_ele), 0);
2987                         if (ntuple_filter_ptr == NULL) {
2988                                 PMD_DRV_LOG(ERR, "Failed to allocate ntuple_filter_ptr");
2989                                 (void)hinic_add_del_ntuple_filter(dev,
2990                                                         &ntuple_filter, FALSE);
2991                                 goto out;
2992                         }
2993                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2994                                    &ntuple_filter,
2995                                    sizeof(struct rte_eth_ntuple_filter));
2996                         TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
2997                         ntuple_filter_ptr, entries);
2998                         flow->rule = ntuple_filter_ptr;
2999                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
3000
3001                         PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
3002                         hinic_global_func_id(nic_dev->hwdev));
3003                         return flow;
3004                 }
3005                 goto out;
3006         }
3007
3008         /* Add ethertype filter */
3009         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3010         ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
3011                                         &ethertype_filter, error);
3012         if (!ret) {
3013                 ret = hinic_add_del_ethertype_filter(dev, &ethertype_filter,
3014                                                      TRUE);
3015                 if (!ret) {
3016                         ethertype_filter_ptr =
3017                                 rte_zmalloc("hinic_ethertype_filter",
3018                                 sizeof(struct hinic_ethertype_filter_ele), 0);
3019                         if (ethertype_filter_ptr == NULL) {
3020                                 PMD_DRV_LOG(ERR, "Failed to allocate ethertype_filter_ptr");
3021                                 (void)hinic_add_del_ethertype_filter(dev,
3022                                                 &ethertype_filter, FALSE);
3023                                 goto out;
3024                         }
3025                         rte_memcpy(&ethertype_filter_ptr->filter_info,
3026                                 &ethertype_filter,
3027                                 sizeof(struct rte_eth_ethertype_filter));
3028                         TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
3029                                 ethertype_filter_ptr, entries);
3030                         flow->rule = ethertype_filter_ptr;
3031                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3032
3033                         PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
3034                                         hinic_global_func_id(nic_dev->hwdev));
3035                         return flow;
3036                 }
3037                 goto out;
3038         }
3039
3040         /* Add fdir filter */
3041         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
3042         ret = hinic_parse_fdir_filter(dev, attr, pattern,
3043                                       actions, &fdir_rule, error);
3044         if (!ret) {
3045                 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
3046                         ret = hinic_add_del_fdir_filter(dev, &fdir_rule, TRUE);
3047                 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
3048                         ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
3049                                                              TRUE);
3050                 }  else {
3051                         PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong");
3052                         goto out;
3053                 }
3054                 if (!ret) {
3055                         fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
3056                                 sizeof(struct hinic_fdir_rule_ele), 0);
3057                         if (fdir_rule_ptr == NULL) {
3058                                 PMD_DRV_LOG(ERR, "Failed to allocate fdir_rule_ptr");
3059                                 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL)
3060                                         hinic_add_del_fdir_filter(dev,
3061                                                 &fdir_rule, FALSE);
3062                                 else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM)
3063                                         hinic_add_del_tcam_fdir_filter(dev,
3064                                                 &fdir_rule, FALSE);
3065
3066                                 goto out;
3067                         }
3068                         rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
3069                                 sizeof(struct hinic_fdir_rule));
3070                         TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
3071                                 fdir_rule_ptr, entries);
3072                         flow->rule = fdir_rule_ptr;
3073                         flow->filter_type = RTE_ETH_FILTER_FDIR;
3074
3075                         PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
3076                                         hinic_global_func_id(nic_dev->hwdev));
3077                         return flow;
3078                 }
3079                 goto out;
3080         }
3081
3082 out:
3083         TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
3084         rte_flow_error_set(error, -ret,
3085                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3086                            "Failed to create flow.");
3087         rte_free(hinic_flow_mem_ptr);
3088         rte_free(flow);
3089         return NULL;
3090 }
3091
3092 /* Destroy a flow rule on hinic. */
3093 static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
3094                                 struct rte_flow_error *error)
3095 {
3096         int ret;
3097         struct rte_flow *pmd_flow = flow;
3098         enum rte_filter_type filter_type = pmd_flow->filter_type;
3099         struct rte_eth_ntuple_filter ntuple_filter;
3100         struct rte_eth_ethertype_filter ethertype_filter;
3101         struct hinic_fdir_rule fdir_rule;
3102         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3103         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3104         struct hinic_fdir_rule_ele *fdir_rule_ptr;
3105         struct hinic_flow_mem *hinic_flow_mem_ptr;
3106         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3107
3108         switch (filter_type) {
3109         case RTE_ETH_FILTER_NTUPLE:
3110                 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
3111                                         pmd_flow->rule;
3112                 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
3113                         sizeof(struct rte_eth_ntuple_filter));
3114                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3115                 if (!ret) {
3116                         TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
3117                                 ntuple_filter_ptr, entries);
3118                         rte_free(ntuple_filter_ptr);
3119                 }
3120                 break;
3121         case RTE_ETH_FILTER_ETHERTYPE:
3122                 ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
3123                                         pmd_flow->rule;
3124                 rte_memcpy(&ethertype_filter,
3125                         &ethertype_filter_ptr->filter_info,
3126                         sizeof(struct rte_eth_ethertype_filter));
3127                 ret = hinic_add_del_ethertype_filter(dev,
3128                                 &ethertype_filter, FALSE);
3129                 if (!ret) {
3130                         TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3131                                 ethertype_filter_ptr, entries);
3132                         rte_free(ethertype_filter_ptr);
3133                 }
3134                 break;
3135         case RTE_ETH_FILTER_FDIR:
3136                 fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
3137                 rte_memcpy(&fdir_rule,
3138                         &fdir_rule_ptr->filter_info,
3139                         sizeof(struct hinic_fdir_rule));
3140                 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
3141                         ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
3142                 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
3143                         ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
3144                                                                 FALSE);
3145                 } else {
3146                         PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!");
3147                         ret = -EINVAL;
3148                 }
3149                 if (!ret) {
3150                         TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
3151                                 fdir_rule_ptr, entries);
3152                         rte_free(fdir_rule_ptr);
3153                 }
3154                 break;
3155         default:
3156                 PMD_DRV_LOG(WARNING, "Filter type (%d) is not supported",
3157                         filter_type);
3158                 ret = -EINVAL;
3159                 break;
3160         }
3161
3162         if (ret) {
3163                 rte_flow_error_set(error, EINVAL,
3164                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3165                                 NULL, "Failed to destroy flow");
3166                 return ret;
3167         }
3168
3169         TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
3170                 if (hinic_flow_mem_ptr->flow == pmd_flow) {
3171                         TAILQ_REMOVE(&nic_dev->hinic_flow_list,
3172                                 hinic_flow_mem_ptr, entries);
3173                         rte_free(hinic_flow_mem_ptr);
3174                         break;
3175                 }
3176         }
3177         rte_free(flow);
3178
3179         PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
3180                         hinic_global_func_id(nic_dev->hwdev));
3181
3182         return ret;
3183 }
3184
3185 /* Remove all the n-tuple filters */
3186 static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev)
3187 {
3188         struct hinic_filter_info *filter_info =
3189                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3190         struct hinic_5tuple_filter *p_5tuple;
3191
3192         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
3193                 hinic_remove_5tuple_filter(dev, p_5tuple);
3194 }
3195
3196 /* Remove all the ether type filters */
3197 static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
3198 {
3199         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3200         struct hinic_filter_info *filter_info =
3201                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
3202         int ret = 0;
3203
3204         if (filter_info->type_mask &
3205                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) {
3206                 hinic_ethertype_filter_remove(filter_info,
3207                         HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE));
3208                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE,
3209                                         filter_info->qid, false, true);
3210
3211                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
3212         }
3213
3214         if (filter_info->type_mask &
3215                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) {
3216                 hinic_ethertype_filter_remove(filter_info,
3217                         HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE));
3218                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE,
3219                         filter_info->qid, false, true);
3220         }
3221
3222         if (ret)
3223                 PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x",
3224                                 filter_info->pkt_type);
3225 }
3226
3227 /* Remove all the ether type filters */
3228 static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
3229 {
3230         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3231         struct hinic_tcam_info *tcam_info =
3232                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
3233         struct hinic_tcam_filter *tcam_filter_ptr;
3234
3235         while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list)))
3236                 (void)hinic_del_tcam_filter(dev, tcam_filter_ptr);
3237
3238         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
3239
3240         (void)hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false);
3241
3242         (void)hinic_flush_tcam_rule(nic_dev->hwdev);
3243 }
3244
3245 static void hinic_filterlist_flush(struct rte_eth_dev *dev)
3246 {
3247         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3248         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3249         struct hinic_fdir_rule_ele *fdir_rule_ptr;
3250         struct hinic_flow_mem *hinic_flow_mem_ptr;
3251         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3252
3253         while ((ntuple_filter_ptr =
3254                         TAILQ_FIRST(&nic_dev->filter_ntuple_list))) {
3255                 TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr,
3256                                  entries);
3257                 rte_free(ntuple_filter_ptr);
3258         }
3259
3260         while ((ethertype_filter_ptr =
3261                         TAILQ_FIRST(&nic_dev->filter_ethertype_list))) {
3262                 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3263                                 ethertype_filter_ptr,
3264                                 entries);
3265                 rte_free(ethertype_filter_ptr);
3266         }
3267
3268         while ((fdir_rule_ptr =
3269                         TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) {
3270                 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr,
3271                                  entries);
3272                 rte_free(fdir_rule_ptr);
3273         }
3274
3275         while ((hinic_flow_mem_ptr =
3276                         TAILQ_FIRST(&nic_dev->hinic_flow_list))) {
3277                 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
3278                                  entries);
3279                 rte_free(hinic_flow_mem_ptr->flow);
3280                 rte_free(hinic_flow_mem_ptr);
3281         }
3282 }
3283
3284 /* Destroy all flow rules associated with a port on hinic. */
3285 static int hinic_flow_flush(struct rte_eth_dev *dev,
3286                                 __rte_unused struct rte_flow_error *error)
3287 {
3288         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3289
3290         hinic_clear_all_ntuple_filter(dev);
3291         hinic_clear_all_ethertype_filter(dev);
3292         hinic_clear_all_fdir_filter(dev);
3293         hinic_filterlist_flush(dev);
3294
3295         PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x",
3296                         hinic_global_func_id(nic_dev->hwdev));
3297         return 0;
3298 }
3299
3300 void hinic_destroy_fdir_filter(struct rte_eth_dev *dev)
3301 {
3302         hinic_clear_all_ntuple_filter(dev);
3303         hinic_clear_all_ethertype_filter(dev);
3304         hinic_clear_all_fdir_filter(dev);
3305         hinic_filterlist_flush(dev);
3306 }
3307
3308 const struct rte_flow_ops hinic_flow_ops = {
3309         .validate = hinic_flow_validate,
3310         .create = hinic_flow_create,
3311         .destroy = hinic_flow_destroy,
3312         .flush = hinic_flow_flush,
3313 };
3314