net/bnxt: fix using RSS config struct
[dpdk.git] / drivers / net / hinic / hinic_pmd_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
25
26 #define HINIC_MAX_RX_QUEUE_NUM          64
27
28 #ifndef UINT8_MAX
29 #define UINT8_MAX          (u8)(~((u8)0))       /* 0xFF               */
30 #define UINT16_MAX         (u16)(~((u16)0))     /* 0xFFFF             */
31 #define UINT32_MAX         (u32)(~((u32)0))     /* 0xFFFFFFFF         */
32 #define UINT64_MAX         (u64)(~((u64)0))     /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX          (0x7F)
34 #endif
35
36 /* IPSURX MACRO */
37 #define PA_ETH_TYPE_ROCE                0
38 #define PA_ETH_TYPE_IPV4                1
39 #define PA_ETH_TYPE_IPV6                2
40 #define PA_ETH_TYPE_OTHER               3
41
42 #define PA_IP_PROTOCOL_TYPE_TCP         1
43 #define PA_IP_PROTOCOL_TYPE_UDP         2
44 #define PA_IP_PROTOCOL_TYPE_ICMP        3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP   4
46 #define PA_IP_PROTOCOL_TYPE_SCTP        5
47 #define PA_IP_PROTOCOL_TYPE_VRRP        112
48
49 #define IP_HEADER_PROTOCOL_TYPE_TCP     6
50 #define IP_HEADER_PROTOCOL_TYPE_UDP     17
51 #define IP_HEADER_PROTOCOL_TYPE_ICMP    1
52 #define IP_HEADER_PROTOCOL_TYPE_ICMPV6  58
53
54 #define FDIR_TCAM_NORMAL_PACKET         0
55 #define FDIR_TCAM_TUNNEL_PACKET         1
56
57 #define HINIC_MIN_N_TUPLE_PRIO          1
58 #define HINIC_MAX_N_TUPLE_PRIO          7
59
60 /* TCAM type mask in hardware */
61 #define TCAM_PKT_BGP_SPORT      1
62 #define TCAM_PKT_VRRP           2
63 #define TCAM_PKT_BGP_DPORT      3
64 #define TCAM_PKT_LACP           4
65
66 #define TCAM_DIP_IPV4_TYPE      0
67 #define TCAM_DIP_IPV6_TYPE      1
68
69 #define BGP_DPORT_ID            179
70 #define IPPROTO_VRRP            112
71
72 /* Packet type defined in hardware to perform filter */
73 #define PKT_IGMP_IPV4_TYPE     64
74 #define PKT_ICMP_IPV4_TYPE     65
75 #define PKT_ICMP_IPV6_TYPE     66
76 #define PKT_ICMP_IPV6RS_TYPE   67
77 #define PKT_ICMP_IPV6RA_TYPE   68
78 #define PKT_ICMP_IPV6NS_TYPE   69
79 #define PKT_ICMP_IPV6NA_TYPE   70
80 #define PKT_ICMP_IPV6RE_TYPE   71
81 #define PKT_DHCP_IPV4_TYPE     72
82 #define PKT_DHCP_IPV6_TYPE     73
83 #define PKT_LACP_TYPE          74
84 #define PKT_ARP_REQ_TYPE       79
85 #define PKT_ARP_REP_TYPE       80
86 #define PKT_ARP_TYPE           81
87 #define PKT_BGPD_DPORT_TYPE    83
88 #define PKT_BGPD_SPORT_TYPE    84
89 #define PKT_VRRP_TYPE          85
90
91 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
92         (&((struct hinic_nic_dev *)nic_dev)->filter)
93
94 #define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \
95         (&((struct hinic_nic_dev *)nic_dev)->tcam)
96
97
98 enum hinic_atr_flow_type {
99         HINIC_ATR_FLOW_TYPE_IPV4_DIP    = 0x1,
100         HINIC_ATR_FLOW_TYPE_IPV4_SIP    = 0x2,
101         HINIC_ATR_FLOW_TYPE_DPORT       = 0x3,
102         HINIC_ATR_FLOW_TYPE_SPORT       = 0x4,
103 };
104
105 /* Structure to store fdir's info. */
106 struct hinic_fdir_info {
107         uint8_t fdir_flag;
108         uint8_t qid;
109         uint32_t fdir_key;
110 };
111
112 /**
113  * Endless loop will never happen with below assumption
114  * 1. there is at least one no-void item(END)
115  * 2. cur is before END.
116  */
117 static inline const struct rte_flow_item *
118 next_no_void_pattern(const struct rte_flow_item pattern[],
119                 const struct rte_flow_item *cur)
120 {
121         const struct rte_flow_item *next =
122                 cur ? cur + 1 : &pattern[0];
123         while (1) {
124                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
125                         return next;
126                 next++;
127         }
128 }
129
130 static inline const struct rte_flow_action *
131 next_no_void_action(const struct rte_flow_action actions[],
132                 const struct rte_flow_action *cur)
133 {
134         const struct rte_flow_action *next =
135                 cur ? cur + 1 : &actions[0];
136         while (1) {
137                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
138                         return next;
139                 next++;
140         }
141 }
142
143 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
144                                         struct rte_flow_error *error)
145 {
146         /* Must be input direction */
147         if (!attr->ingress) {
148                 rte_flow_error_set(error, EINVAL,
149                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
150                         attr, "Only support ingress.");
151                 return -rte_errno;
152         }
153
154         if (attr->egress) {
155                 rte_flow_error_set(error, EINVAL,
156                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
157                                 attr, "Not support egress.");
158                 return -rte_errno;
159         }
160
161         if (attr->priority) {
162                 rte_flow_error_set(error, EINVAL,
163                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
164                                 attr, "Not support priority.");
165                 return -rte_errno;
166         }
167
168         if (attr->group) {
169                 rte_flow_error_set(error, EINVAL,
170                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
171                                 attr, "Not support group.");
172                 return -rte_errno;
173         }
174
175         return 0;
176 }
177
178 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
179                                 const struct rte_flow_item *pattern,
180                                 const struct rte_flow_action *actions,
181                                 struct rte_flow_error *error)
182 {
183         if (!pattern) {
184                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
185                                 NULL, "NULL pattern.");
186                 return -rte_errno;
187         }
188
189         if (!actions) {
190                 rte_flow_error_set(error, EINVAL,
191                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
192                                 NULL, "NULL action.");
193                 return -rte_errno;
194         }
195
196         if (!attr) {
197                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
198                                    NULL, "NULL attribute.");
199                 return -rte_errno;
200         }
201
202         return 0;
203 }
204
205 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
206                                         struct rte_flow_error *error)
207 {
208         /* The first non-void item should be MAC */
209         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
210                 rte_flow_error_set(error, EINVAL,
211                         RTE_FLOW_ERROR_TYPE_ITEM,
212                         item, "Not supported by ethertype filter");
213                 return -rte_errno;
214         }
215
216         /* Not supported last point for range */
217         if (item->last) {
218                 rte_flow_error_set(error, EINVAL,
219                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
220                         item, "Not supported last point for range");
221                 return -rte_errno;
222         }
223
224         /* Get the MAC info. */
225         if (!item->spec || !item->mask) {
226                 rte_flow_error_set(error, EINVAL,
227                                 RTE_FLOW_ERROR_TYPE_ITEM,
228                                 item, "Not supported by ethertype filter");
229                 return -rte_errno;
230         }
231         return 0;
232 }
233
234 static int
235 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
236                         const struct rte_flow_action *act,
237                         const struct rte_flow_action_queue *act_q,
238                         struct rte_eth_ethertype_filter *filter,
239                         struct rte_flow_error *error)
240 {
241         /* Parse action */
242         act = next_no_void_action(actions, NULL);
243         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
244                 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
245                 rte_flow_error_set(error, EINVAL,
246                                 RTE_FLOW_ERROR_TYPE_ACTION,
247                                 act, "Not supported action.");
248                 return -rte_errno;
249         }
250
251         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
252                 act_q = (const struct rte_flow_action_queue *)act->conf;
253                 filter->queue = act_q->index;
254         } else {
255                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
256         }
257
258         /* Check if the next non-void item is END */
259         act = next_no_void_action(actions, act);
260         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
261                 rte_flow_error_set(error, EINVAL,
262                                 RTE_FLOW_ERROR_TYPE_ACTION,
263                                 act, "Not supported action.");
264                 return -rte_errno;
265         }
266
267         return 0;
268 }
269
270 /**
271  * Parse the rule to see if it is a ethertype rule.
272  * And get the ethertype filter info BTW.
273  * pattern:
274  * The first not void item can be ETH.
275  * The next not void item must be END.
276  * action:
277  * The first not void action should be QUEUE.
278  * The next not void action should be END.
279  * pattern example:
280  * ITEM         Spec                    Mask
281  * ETH          type    0x0807          0xFFFF
282  * END
283  * other members in mask and spec should set to 0x00.
284  * item->last should be NULL.
285  */
286 static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
287                         const struct rte_flow_item *pattern,
288                         const struct rte_flow_action *actions,
289                         struct rte_eth_ethertype_filter *filter,
290                         struct rte_flow_error *error)
291 {
292         const struct rte_flow_item *item;
293         const struct rte_flow_action *act = NULL;
294         const struct rte_flow_item_eth *eth_spec;
295         const struct rte_flow_item_eth *eth_mask;
296         const struct rte_flow_action_queue *act_q = NULL;
297
298         if (hinic_check_filter_arg(attr, pattern, actions, error))
299                 return -rte_errno;
300
301         item = next_no_void_pattern(pattern, NULL);
302         if (hinic_check_ethertype_first_item(item, error))
303                 return -rte_errno;
304
305         eth_spec = (const struct rte_flow_item_eth *)item->spec;
306         eth_mask = (const struct rte_flow_item_eth *)item->mask;
307
308         /*
309          * Mask bits of source MAC address must be full of 0.
310          * Mask bits of destination MAC address must be full
311          * of 1 or full of 0.
312          */
313         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
314             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
315              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
316                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
317                                 item, "Invalid ether address mask");
318                 return -rte_errno;
319         }
320
321         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
322                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
323                                 item, "Invalid ethertype mask");
324                 return -rte_errno;
325         }
326
327         /*
328          * If mask bits of destination MAC address
329          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
330          */
331         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
332                 filter->mac_addr = eth_spec->dst;
333                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
334         } else {
335                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
336         }
337         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
338
339         /* Check if the next non-void item is END. */
340         item = next_no_void_pattern(pattern, item);
341         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
342                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
343                         item, "Not supported by ethertype filter.");
344                 return -rte_errno;
345         }
346
347         if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
348                 return -rte_errno;
349
350         if (hinic_check_ethertype_attr_ele(attr, error))
351                 return -rte_errno;
352
353         return 0;
354 }
355
356 static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
357                         const struct rte_flow_attr *attr,
358                         const struct rte_flow_item pattern[],
359                         const struct rte_flow_action actions[],
360                         struct rte_eth_ethertype_filter *filter,
361                         struct rte_flow_error *error)
362 {
363         if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
364                 return -rte_errno;
365
366         /* NIC doesn't support MAC address. */
367         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
368                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
369                 rte_flow_error_set(error, EINVAL,
370                         RTE_FLOW_ERROR_TYPE_ITEM,
371                         NULL, "Not supported by ethertype filter");
372                 return -rte_errno;
373         }
374
375         if (filter->queue >= dev->data->nb_rx_queues) {
376                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
377                 rte_flow_error_set(error, EINVAL,
378                         RTE_FLOW_ERROR_TYPE_ITEM,
379                         NULL, "Queue index much too big");
380                 return -rte_errno;
381         }
382
383         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
384                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
385                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
386                 rte_flow_error_set(error, EINVAL,
387                         RTE_FLOW_ERROR_TYPE_ITEM,
388                         NULL, "IPv4/IPv6 not supported by ethertype filter");
389                 return -rte_errno;
390         }
391
392         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
393                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
394                 rte_flow_error_set(error, EINVAL,
395                         RTE_FLOW_ERROR_TYPE_ITEM,
396                         NULL, "Drop option is unsupported");
397                 return -rte_errno;
398         }
399
400         /* Hinic only support LACP/ARP for ether type */
401         if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
402                 filter->ether_type != RTE_ETHER_TYPE_ARP) {
403                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
404                 rte_flow_error_set(error, EINVAL,
405                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
406                         "only lacp/arp type supported by ethertype filter");
407                 return -rte_errno;
408         }
409
410         return 0;
411 }
412
413 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
414                                 struct rte_eth_ntuple_filter *filter,
415                                 struct rte_flow_error *error)
416 {
417         /* Must be input direction */
418         if (!attr->ingress) {
419                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
420                 rte_flow_error_set(error, EINVAL,
421                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
422                                    attr, "Only support ingress.");
423                 return -rte_errno;
424         }
425
426         if (attr->egress) {
427                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
428                 rte_flow_error_set(error, EINVAL,
429                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
430                                    attr, "Not support egress.");
431                 return -rte_errno;
432         }
433
434         if (attr->priority > 0xFFFF) {
435                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
436                 rte_flow_error_set(error, EINVAL,
437                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
438                                    attr, "Error priority.");
439                 return -rte_errno;
440         }
441
442         if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
443                     attr->priority > HINIC_MAX_N_TUPLE_PRIO)
444                 filter->priority = 1;
445         else
446                 filter->priority = (uint16_t)attr->priority;
447
448         return 0;
449 }
450
451 static int
452 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
453                         const struct rte_flow_action actions[],
454                         struct rte_eth_ntuple_filter *filter,
455                         struct rte_flow_error *error)
456 {
457         const struct rte_flow_action *act;
458         /*
459          * n-tuple only supports forwarding,
460          * check if the first not void action is QUEUE.
461          */
462         act = next_no_void_action(actions, NULL);
463         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
464                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
465                 rte_flow_error_set(error, EINVAL,
466                         RTE_FLOW_ERROR_TYPE_ACTION,
467                         act, "Flow action type is not QUEUE.");
468                 return -rte_errno;
469         }
470         filter->queue =
471                 ((const struct rte_flow_action_queue *)act->conf)->index;
472
473         /* Check if the next not void item is END */
474         act = next_no_void_action(actions, act);
475         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
476                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
477                 rte_flow_error_set(error, EINVAL,
478                         RTE_FLOW_ERROR_TYPE_ACTION,
479                         act, "Next not void item is not END.");
480                 return -rte_errno;
481         }
482
483         return 0;
484 }
485
486 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
487                                         const struct rte_flow_item pattern[],
488                                         struct rte_flow_error *error)
489 {
490         const struct rte_flow_item *item;
491
492         /* The first not void item can be MAC or IPv4 */
493         item = next_no_void_pattern(pattern, NULL);
494
495         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
496                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
497                 rte_flow_error_set(error, EINVAL,
498                         RTE_FLOW_ERROR_TYPE_ITEM,
499                         item, "Not supported by ntuple filter");
500                 return -rte_errno;
501         }
502
503         /* Skip Ethernet */
504         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
505                 /* Not supported last point for range */
506                 if (item->last) {
507                         rte_flow_error_set(error,
508                                 EINVAL,
509                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
510                                 item, "Not supported last point for range");
511                         return -rte_errno;
512                 }
513                 /* if the first item is MAC, the content should be NULL */
514                 if (item->spec || item->mask) {
515                         rte_flow_error_set(error, EINVAL,
516                                 RTE_FLOW_ERROR_TYPE_ITEM,
517                                 item, "Not supported by ntuple filter");
518                         return -rte_errno;
519                 }
520                 /* check if the next not void item is IPv4 */
521                 item = next_no_void_pattern(pattern, item);
522                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
523                         rte_flow_error_set(error,
524                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
525                                 item, "Not supported by ntuple filter");
526                         return -rte_errno;
527                 }
528         }
529
530         *ipv4_item = item;
531         return 0;
532 }
533
534 static int
535 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
536                         const struct rte_flow_item pattern[],
537                         struct rte_eth_ntuple_filter *filter,
538                         struct rte_flow_error *error)
539 {
540         const struct rte_flow_item_ipv4 *ipv4_spec;
541         const struct rte_flow_item_ipv4 *ipv4_mask;
542         const struct rte_flow_item *item = *in_out_item;
543
544         /* Get the IPv4 info */
545         if (!item->spec || !item->mask) {
546                 rte_flow_error_set(error, EINVAL,
547                         RTE_FLOW_ERROR_TYPE_ITEM,
548                         item, "Invalid ntuple mask");
549                 return -rte_errno;
550         }
551         /* Not supported last point for range */
552         if (item->last) {
553                 rte_flow_error_set(error, EINVAL,
554                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
555                         item, "Not supported last point for range");
556                 return -rte_errno;
557         }
558
559         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
560         /*
561          * Only support src & dst addresses, protocol,
562          * others should be masked.
563          */
564         if (ipv4_mask->hdr.version_ihl ||
565                 ipv4_mask->hdr.type_of_service ||
566                 ipv4_mask->hdr.total_length ||
567                 ipv4_mask->hdr.packet_id ||
568                 ipv4_mask->hdr.fragment_offset ||
569                 ipv4_mask->hdr.time_to_live ||
570                 ipv4_mask->hdr.hdr_checksum ||
571                 !ipv4_mask->hdr.next_proto_id) {
572                 rte_flow_error_set(error,
573                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
574                         item, "Not supported by ntuple filter");
575                 return -rte_errno;
576         }
577
578         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
579         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
580         filter->proto_mask = ipv4_mask->hdr.next_proto_id;
581
582         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
583         filter->dst_ip = ipv4_spec->hdr.dst_addr;
584         filter->src_ip = ipv4_spec->hdr.src_addr;
585         filter->proto  = ipv4_spec->hdr.next_proto_id;
586
587         /* Get next no void item */
588         *in_out_item = next_no_void_pattern(pattern, item);
589         return 0;
590 }
591
592 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
593                                 const struct rte_flow_item pattern[],
594                                 struct rte_eth_ntuple_filter *filter,
595                                 struct rte_flow_error *error)
596 {
597         const struct rte_flow_item_tcp *tcp_spec;
598         const struct rte_flow_item_tcp *tcp_mask;
599         const struct rte_flow_item_icmp *icmp_mask;
600         const struct rte_flow_item *item = *in_out_item;
601         u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
602
603         if (item->type == RTE_FLOW_ITEM_TYPE_END)
604                 return 0;
605
606         /* Get TCP or UDP info */
607         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
608                 (!item->spec || !item->mask)) {
609                 memset(filter, 0, ntuple_filter_size);
610                 rte_flow_error_set(error, EINVAL,
611                         RTE_FLOW_ERROR_TYPE_ITEM,
612                         item, "Invalid ntuple mask");
613                 return -rte_errno;
614         }
615
616         /* Not supported last point for range */
617         if (item->last) {
618                 memset(filter, 0, ntuple_filter_size);
619                 rte_flow_error_set(error, EINVAL,
620                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
621                         item, "Not supported last point for range");
622                 return -rte_errno;
623         }
624
625         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
626                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
627
628                 /*
629                  * Only support src & dst ports, tcp flags,
630                  * others should be masked.
631                  */
632                 if (tcp_mask->hdr.sent_seq ||
633                         tcp_mask->hdr.recv_ack ||
634                         tcp_mask->hdr.data_off ||
635                         tcp_mask->hdr.rx_win ||
636                         tcp_mask->hdr.cksum ||
637                         tcp_mask->hdr.tcp_urp) {
638                         memset(filter, 0, ntuple_filter_size);
639                         rte_flow_error_set(error, EINVAL,
640                                 RTE_FLOW_ERROR_TYPE_ITEM,
641                                 item, "Not supported by ntuple filter");
642                         return -rte_errno;
643                 }
644
645                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
646                 filter->src_port_mask  = tcp_mask->hdr.src_port;
647                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
648                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
649                 } else if (!tcp_mask->hdr.tcp_flags) {
650                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
651                 } else {
652                         memset(filter, 0, ntuple_filter_size);
653                         rte_flow_error_set(error, EINVAL,
654                                 RTE_FLOW_ERROR_TYPE_ITEM,
655                                 item, "Not supported by ntuple filter");
656                         return -rte_errno;
657                 }
658
659                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
660                 filter->dst_port  = tcp_spec->hdr.dst_port;
661                 filter->src_port  = tcp_spec->hdr.src_port;
662                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
663         } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
664                 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
665
666                 /* ICMP all should be masked. */
667                 if (icmp_mask->hdr.icmp_cksum ||
668                         icmp_mask->hdr.icmp_ident ||
669                         icmp_mask->hdr.icmp_seq_nb ||
670                         icmp_mask->hdr.icmp_type ||
671                         icmp_mask->hdr.icmp_code) {
672                         memset(filter, 0, ntuple_filter_size);
673                         rte_flow_error_set(error, EINVAL,
674                                 RTE_FLOW_ERROR_TYPE_ITEM,
675                                 item, "Not supported by ntuple filter");
676                         return -rte_errno;
677                 }
678         }
679
680         /* Get next no void item */
681         *in_out_item = next_no_void_pattern(pattern, item);
682         return 0;
683 }
684
685 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
686                                         struct rte_eth_ntuple_filter *filter,
687                                         struct rte_flow_error *error)
688 {
689         /* Check if the next not void item is END */
690         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
691                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
692                 rte_flow_error_set(error, EINVAL,
693                         RTE_FLOW_ERROR_TYPE_ITEM,
694                         item, "Not supported by ntuple filter");
695                 return -rte_errno;
696         }
697         return 0;
698 }
699
700 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
701                                         const struct rte_flow_item pattern[],
702                                         struct rte_eth_ntuple_filter *filter,
703                                         struct rte_flow_error *error)
704 {
705         if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
706                 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
707                 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
708                 hinic_ntuple_item_check_end(item, filter, error))
709                 return -rte_errno;
710
711         return 0;
712 }
713
714 /**
715  * Parse the rule to see if it is a n-tuple rule.
716  * And get the n-tuple filter info BTW.
717  * pattern:
718  * The first not void item can be ETH or IPV4.
719  * The second not void item must be IPV4 if the first one is ETH.
720  * The third not void item must be UDP or TCP.
721  * The next not void item must be END.
722  * action:
723  * The first not void action should be QUEUE.
724  * The next not void action should be END.
725  * pattern example:
726  * ITEM         Spec                    Mask
727  * ETH          NULL                    NULL
728  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
729  *              dst_addr 192.167.3.50   0xFFFFFFFF
730  *              next_proto_id   17      0xFF
731  * UDP/TCP/     src_port        80      0xFFFF
732  * SCTP         dst_port        80      0xFFFF
733  * END
734  * other members in mask and spec should set to 0x00.
735  * item->last should be NULL.
736  * Please aware there's an asumption for all the parsers.
737  * rte_flow_item is using big endian, rte_flow_attr and
738  * rte_flow_action are using CPU order.
739  * Because the pattern is used to describe the packets,
740  * normally the packets should use network order.
741  */
742 static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
743                         const struct rte_flow_item pattern[],
744                         const struct rte_flow_action actions[],
745                         struct rte_eth_ntuple_filter *filter,
746                         struct rte_flow_error *error)
747 {
748         const struct rte_flow_item *item = NULL;
749
750         if (hinic_check_filter_arg(attr, pattern, actions, error))
751                 return -rte_errno;
752
753         if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
754                 return -rte_errno;
755
756         if (hinic_check_ntuple_act_ele(item, actions, filter, error))
757                 return -rte_errno;
758
759         if (hinic_check_ntuple_attr_ele(attr, filter, error))
760                 return -rte_errno;
761
762         return 0;
763 }
764
765 static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
766                         const struct rte_flow_attr *attr,
767                         const struct rte_flow_item pattern[],
768                         const struct rte_flow_action actions[],
769                         struct rte_eth_ntuple_filter *filter,
770                         struct rte_flow_error *error)
771 {
772         int ret;
773
774         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
775         if (ret)
776                 return ret;
777
778         /* Hinic doesn't support tcp flags */
779         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
780                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
781                 rte_flow_error_set(error, EINVAL,
782                                    RTE_FLOW_ERROR_TYPE_ITEM,
783                                    NULL, "Not supported by ntuple filter");
784                 return -rte_errno;
785         }
786
787         /* Hinic doesn't support many priorities */
788         if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
789             filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
790                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
791                 rte_flow_error_set(error, EINVAL,
792                         RTE_FLOW_ERROR_TYPE_ITEM,
793                         NULL, "Priority not supported by ntuple filter");
794                 return -rte_errno;
795         }
796
797         if (filter->queue >= dev->data->nb_rx_queues)
798                 return -rte_errno;
799
800         /* Fixed value for hinic */
801         filter->flags = RTE_5TUPLE_FLAGS;
802         return 0;
803 }
804
805 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
806                                         const struct rte_flow_item pattern[],
807                                         struct rte_flow_error *error)
808 {
809         const struct rte_flow_item *item;
810
811         /* The first not void item can be MAC or IPv4  or TCP or UDP */
812         item = next_no_void_pattern(pattern, NULL);
813
814         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
815                 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
816                 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
817                 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
818                 rte_flow_error_set(error, EINVAL,
819                         RTE_FLOW_ERROR_TYPE_ITEM, item,
820                         "Not supported by fdir filter,support mac,ipv4,tcp,udp");
821                 return -rte_errno;
822         }
823
824         /* Not supported last point for range */
825         if (item->last) {
826                 rte_flow_error_set(error, EINVAL,
827                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
828                         "Not supported last point for range");
829                 return -rte_errno;
830         }
831
832         /* Skip Ethernet */
833         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
834                 /* All should be masked. */
835                 if (item->spec || item->mask) {
836                         rte_flow_error_set(error, EINVAL,
837                                 RTE_FLOW_ERROR_TYPE_ITEM,
838                                 item, "Not supported by fdir filter,support mac");
839                         return -rte_errno;
840                 }
841                 /* Check if the next not void item is IPv4 */
842                 item = next_no_void_pattern(pattern, item);
843                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
844                         item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
845                         rte_flow_error_set(error, EINVAL,
846                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
847                                 "Not supported by fdir filter,support mac,ipv4");
848                         return -rte_errno;
849                 }
850         }
851
852         *ip_item = item;
853         return 0;
854 }
855
856 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
857                                 const struct rte_flow_item pattern[],
858                                 struct hinic_fdir_rule *rule,
859                                 struct rte_flow_error *error)
860 {
861         const struct rte_flow_item_ipv4 *ipv4_spec;
862         const struct rte_flow_item_ipv4 *ipv4_mask;
863         const struct rte_flow_item_ipv6 *ipv6_spec;
864         const struct rte_flow_item_ipv6 *ipv6_mask;
865         const struct rte_flow_item *item = *in_out_item;
866         int i;
867
868         /* Get the IPv4 info */
869         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
870                 /* Not supported last point for range */
871                 if (item->last) {
872                         rte_flow_error_set(error, EINVAL,
873                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
874                                 item, "Not supported last point for range");
875                         return -rte_errno;
876                 }
877
878                 if (!item->mask) {
879                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
880                         rte_flow_error_set(error, EINVAL,
881                                 RTE_FLOW_ERROR_TYPE_ITEM,
882                                 item, "Invalid fdir filter mask");
883                         return -rte_errno;
884                 }
885
886                 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
887                 /*
888                  * Only support src & dst addresses,
889                  * others should be masked.
890                  */
891                 if (ipv4_mask->hdr.version_ihl ||
892                         ipv4_mask->hdr.type_of_service ||
893                         ipv4_mask->hdr.total_length ||
894                         ipv4_mask->hdr.packet_id ||
895                         ipv4_mask->hdr.fragment_offset ||
896                         ipv4_mask->hdr.time_to_live ||
897                         ipv4_mask->hdr.next_proto_id ||
898                         ipv4_mask->hdr.hdr_checksum) {
899                         rte_flow_error_set(error,
900                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
901                                 "Not supported by fdir filter, support src,dst ip");
902                         return -rte_errno;
903                 }
904
905                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
906                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
907                 rule->mode = HINIC_FDIR_MODE_NORMAL;
908
909                 if (item->spec) {
910                         ipv4_spec =
911                                 (const struct rte_flow_item_ipv4 *)item->spec;
912                         rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
913                         rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
914                 }
915
916                 /*
917                  * Check if the next not void item is
918                  * TCP or UDP or END.
919                  */
920                 item = next_no_void_pattern(pattern, item);
921                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
922                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
923                     item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
924                     item->type != RTE_FLOW_ITEM_TYPE_ANY &&
925                     item->type != RTE_FLOW_ITEM_TYPE_END) {
926                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
927                         rte_flow_error_set(error, EINVAL,
928                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
929                                 "Not supported by fdir filter, support tcp, udp, end");
930                         return -rte_errno;
931                 }
932         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
933                 /* Not supported last point for range */
934                 if (item->last) {
935                         rte_flow_error_set(error, EINVAL,
936                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
937                                 item, "Not supported last point for range");
938                         return -rte_errno;
939                 }
940
941                 if (!item->mask) {
942                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
943                         rte_flow_error_set(error, EINVAL,
944                                 RTE_FLOW_ERROR_TYPE_ITEM,
945                                 item, "Invalid fdir filter mask");
946                         return -rte_errno;
947                 }
948
949                 ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask;
950
951                 /* Only support dst addresses,  others should be masked */
952                 if (ipv6_mask->hdr.vtc_flow ||
953                     ipv6_mask->hdr.payload_len ||
954                     ipv6_mask->hdr.proto ||
955                     ipv6_mask->hdr.hop_limits) {
956                         rte_flow_error_set(error, EINVAL,
957                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
958                                 "Not supported by fdir filter, support dst ipv6");
959                         return -rte_errno;
960                 }
961
962                 /* check ipv6 src addr mask, ipv6 src addr is 16 bytes */
963                 for (i = 0; i < 16; i++) {
964                         if (ipv6_mask->hdr.src_addr[i] == UINT8_MAX) {
965                                 rte_flow_error_set(error, EINVAL,
966                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
967                                         "Not supported by fdir filter, do not support src ipv6");
968                                 return -rte_errno;
969                         }
970                 }
971
972                 if (!item->spec) {
973                         rte_flow_error_set(error, EINVAL,
974                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
975                                 "Not supported by fdir filter, ipv6 spec is NULL");
976                         return -rte_errno;
977                 }
978
979                 for (i = 0; i < 16; i++) {
980                         if (ipv6_mask->hdr.dst_addr[i] == UINT8_MAX)
981                                 rule->mask.dst_ipv6_mask |= 1 << i;
982                 }
983
984                 ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
985                 rte_memcpy(rule->hinic_fdir.dst_ipv6,
986                            ipv6_spec->hdr.dst_addr, 16);
987
988                 /*
989                  * Check if the next not void item is TCP or UDP or ICMP.
990                  */
991                 item = next_no_void_pattern(pattern, item);
992                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
993                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
994                     item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
995                     item->type != RTE_FLOW_ITEM_TYPE_ICMP6){
996                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
997                         rte_flow_error_set(error, EINVAL,
998                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
999                                 "Not supported by fdir filter, support tcp, udp, icmp");
1000                         return -rte_errno;
1001                 }
1002         }
1003
1004         *in_out_item = item;
1005         return 0;
1006 }
1007
1008 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
1009                         __rte_unused const struct rte_flow_item pattern[],
1010                         __rte_unused struct hinic_fdir_rule *rule,
1011                         struct rte_flow_error *error)
1012 {
1013         const struct rte_flow_item *item = *in_out_item;
1014
1015         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1016                 rte_flow_error_set(error, EINVAL,
1017                         RTE_FLOW_ERROR_TYPE_ITEM,
1018                         item, "Not supported by normal fdir filter, not support l4");
1019                 return -rte_errno;
1020         }
1021
1022         return 0;
1023 }
1024
1025
1026 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
1027                                         struct hinic_fdir_rule *rule,
1028                                         struct rte_flow_error *error)
1029 {
1030         /* Check if the next not void item is END */
1031         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1032                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1033                 rte_flow_error_set(error, EINVAL,
1034                         RTE_FLOW_ERROR_TYPE_ITEM,
1035                         item, "Not supported by fdir filter, support end");
1036                 return -rte_errno;
1037         }
1038
1039         return 0;
1040 }
1041
1042 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
1043                                         const struct rte_flow_item pattern[],
1044                                         struct hinic_fdir_rule *rule,
1045                                         struct rte_flow_error *error)
1046 {
1047         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1048             hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1049             hinic_normal_item_check_l4(&item, pattern, rule, error) ||
1050             hinic_normal_item_check_end(item, rule, error))
1051                 return -rte_errno;
1052
1053         return 0;
1054 }
1055
1056 static int
1057 hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item,
1058                                 const struct rte_flow_item pattern[],
1059                                 struct hinic_fdir_rule *rule,
1060                                 struct rte_flow_error *error)
1061 {
1062         const struct rte_flow_item *item = *in_out_item;
1063         const struct rte_flow_item_tcp *tcp_spec;
1064         const struct rte_flow_item_tcp *tcp_mask;
1065         const struct rte_flow_item_udp *udp_spec;
1066         const struct rte_flow_item_udp *udp_mask;
1067
1068         if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
1069                 rule->mode = HINIC_FDIR_MODE_TCAM;
1070                 rule->mask.proto_mask = UINT16_MAX;
1071                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
1072         } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6) {
1073                 rule->mode = HINIC_FDIR_MODE_TCAM;
1074                 rule->mask.proto_mask = UINT16_MAX;
1075                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMPV6;
1076         } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1077                 rule->mode = HINIC_FDIR_MODE_TCAM;
1078         } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1079                 if (!item->mask) {
1080                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1081                         rte_flow_error_set(error, EINVAL,
1082                                 RTE_FLOW_ERROR_TYPE_ITEM,
1083                                 item, "Not supported by fdir filter, support src, dst ports");
1084                         return -rte_errno;
1085                 }
1086
1087                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1088
1089                 /*
1090                  * Only support src & dst ports, tcp flags,
1091                  * others should be masked.
1092                  */
1093                 if (tcp_mask->hdr.sent_seq ||
1094                         tcp_mask->hdr.recv_ack ||
1095                         tcp_mask->hdr.data_off ||
1096                         tcp_mask->hdr.rx_win ||
1097                         tcp_mask->hdr.cksum ||
1098                         tcp_mask->hdr.tcp_urp) {
1099                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1100                         rte_flow_error_set(error, EINVAL,
1101                                 RTE_FLOW_ERROR_TYPE_ITEM,
1102                                 item, "Not supported by fdir normal tcam filter");
1103                         return -rte_errno;
1104                 }
1105
1106                 rule->mode = HINIC_FDIR_MODE_TCAM;
1107                 rule->mask.proto_mask = UINT16_MAX;
1108                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1109                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1110
1111                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1112                 if (item->spec) {
1113                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1114                         rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port;
1115                         rule->hinic_fdir.src_port = tcp_spec->hdr.src_port;
1116                 }
1117         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1118                 /*
1119                  * Only care about src & dst ports,
1120                  * others should be masked.
1121                  */
1122                 if (!item->mask) {
1123                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1124                         rte_flow_error_set(error, EINVAL,
1125                                 RTE_FLOW_ERROR_TYPE_ITEM,
1126                                 item, "Not supported by fdir filter, support src, dst ports");
1127                         return -rte_errno;
1128                 }
1129
1130                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1131                 if (udp_mask->hdr.dgram_len ||
1132                         udp_mask->hdr.dgram_cksum) {
1133                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1134                         rte_flow_error_set(error, EINVAL,
1135                                 RTE_FLOW_ERROR_TYPE_ITEM,
1136                                 item, "Not supported by fdir filter, support udp");
1137                         return -rte_errno;
1138                 }
1139
1140                 rule->mode = HINIC_FDIR_MODE_TCAM;
1141                 rule->mask.proto_mask = UINT16_MAX;
1142                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1143                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1144
1145                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1146                 if (item->spec) {
1147                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1148                         rule->hinic_fdir.src_port = udp_spec->hdr.src_port;
1149                         rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port;
1150                 }
1151         } else {
1152                 (void)memset(rule,  0, sizeof(struct hinic_fdir_rule));
1153                 rte_flow_error_set(error, EINVAL,
1154                                 RTE_FLOW_ERROR_TYPE_ITEM,
1155                                 item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp");
1156                 return -rte_errno;
1157         }
1158
1159         item = next_no_void_pattern(pattern, item);
1160         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1161                 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1162                 rte_flow_error_set(error, EINVAL,
1163                         RTE_FLOW_ERROR_TYPE_ITEM,
1164                         item, "Not supported by fdir filter tcam normal, support end");
1165                 return -rte_errno;
1166         }
1167
1168         /* get next no void item */
1169         *in_out_item = item;
1170
1171         return 0;
1172 }
1173
1174 static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item,
1175                                         const struct rte_flow_item pattern[],
1176                                         struct hinic_fdir_rule *rule,
1177                                         struct rte_flow_error *error)
1178 {
1179         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1180                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1181                 hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) ||
1182                 hinic_normal_item_check_end(item, rule, error))
1183                 return -rte_errno;
1184
1185         return 0;
1186 }
1187
1188 static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item,
1189                                         const struct rte_flow_item pattern[],
1190                                         struct hinic_fdir_rule *rule,
1191                                         struct rte_flow_error *error)
1192 {
1193         const struct rte_flow_item *item = *in_out_item;
1194
1195         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1196                 item = next_no_void_pattern(pattern, item);
1197                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1198                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1199                         rte_flow_error_set(error, EINVAL,
1200                                 RTE_FLOW_ERROR_TYPE_ITEM,
1201                                 item, "Not supported by fdir filter, support vxlan");
1202                         return -rte_errno;
1203                 }
1204
1205                 *in_out_item = item;
1206         } else {
1207                 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1208                 rte_flow_error_set(error, EINVAL,
1209                                 RTE_FLOW_ERROR_TYPE_ITEM,
1210                                 item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp");
1211                 return -rte_errno;
1212         }
1213
1214         return 0;
1215 }
1216
1217 static int
1218 hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item,
1219                                 const struct rte_flow_item pattern[],
1220                                 struct hinic_fdir_rule *rule,
1221                                 struct rte_flow_error *error)
1222 {
1223         const struct rte_flow_item *item = *in_out_item;
1224
1225
1226         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
1227                 item = next_no_void_pattern(pattern, item);
1228                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1229                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1230                     item->type != RTE_FLOW_ITEM_TYPE_ANY) {
1231                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1232                         rte_flow_error_set(error, EINVAL,
1233                                 RTE_FLOW_ERROR_TYPE_ITEM,
1234                                 item, "Not supported by fdir filter, support tcp/udp");
1235                         return -rte_errno;
1236                 }
1237
1238                 *in_out_item = item;
1239         }
1240
1241         return 0;
1242 }
1243
1244 static int
1245 hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item,
1246                                 const struct rte_flow_item pattern[],
1247                                 struct hinic_fdir_rule *rule,
1248                                 struct rte_flow_error *error)
1249 {
1250         const struct rte_flow_item_tcp *tcp_spec;
1251         const struct rte_flow_item_tcp *tcp_mask;
1252         const struct rte_flow_item_udp *udp_spec;
1253         const struct rte_flow_item_udp *udp_mask;
1254         const struct rte_flow_item *item = *in_out_item;
1255
1256         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1257                 /* Not supported last point for range */
1258                 if (item->last) {
1259                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1260                         rte_flow_error_set(error, EINVAL,
1261                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1262                                 item, "Not supported last point for range");
1263                         return -rte_errno;
1264                 }
1265
1266                 /* get the TCP/UDP info */
1267                 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1268                         /*
1269                          * Only care about src & dst ports,
1270                          * others should be masked.
1271                          */
1272                         if (!item->mask) {
1273                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1274                                 rte_flow_error_set(error, EINVAL,
1275                                         RTE_FLOW_ERROR_TYPE_ITEM,
1276                                         item, "Not supported by fdir filter, support src, dst ports");
1277                                 return -rte_errno;
1278                         }
1279
1280                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1281                         if (tcp_mask->hdr.sent_seq ||
1282                                 tcp_mask->hdr.recv_ack ||
1283                                 tcp_mask->hdr.data_off ||
1284                                 tcp_mask->hdr.tcp_flags ||
1285                                 tcp_mask->hdr.rx_win ||
1286                                 tcp_mask->hdr.cksum ||
1287                                 tcp_mask->hdr.tcp_urp) {
1288                                 (void)memset(rule, 0,
1289                                         sizeof(struct hinic_fdir_rule));
1290                                 rte_flow_error_set(error, EINVAL,
1291                                         RTE_FLOW_ERROR_TYPE_ITEM,
1292                                         item, "Not supported by fdir filter, support tcp");
1293                                 return -rte_errno;
1294                         }
1295
1296                         rule->mode = HINIC_FDIR_MODE_TCAM;
1297                         rule->mask.tunnel_flag = UINT16_MAX;
1298                         rule->mask.tunnel_inner_src_port_mask =
1299                                                         tcp_mask->hdr.src_port;
1300                         rule->mask.tunnel_inner_dst_port_mask =
1301                                                         tcp_mask->hdr.dst_port;
1302                         rule->mask.proto_mask = UINT16_MAX;
1303
1304                         rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1305                         if (item->spec) {
1306                                 tcp_spec =
1307                                 (const struct rte_flow_item_tcp *)item->spec;
1308                                 rule->hinic_fdir.tunnel_inner_src_port =
1309                                                         tcp_spec->hdr.src_port;
1310                                 rule->hinic_fdir.tunnel_inner_dst_port =
1311                                                         tcp_spec->hdr.dst_port;
1312                         }
1313                 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1314                         /*
1315                          * Only care about src & dst ports,
1316                          * others should be masked.
1317                          */
1318                         if (!item->mask) {
1319                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1320                                 rte_flow_error_set(error, EINVAL,
1321                                         RTE_FLOW_ERROR_TYPE_ITEM,
1322                                         item, "Not supported by fdir filter, support src, dst ports");
1323                                 return -rte_errno;
1324                         }
1325
1326                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
1327                         if (udp_mask->hdr.dgram_len ||
1328                             udp_mask->hdr.dgram_cksum) {
1329                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1330                                 rte_flow_error_set(error, EINVAL,
1331                                         RTE_FLOW_ERROR_TYPE_ITEM,
1332                                         item, "Not supported by fdir filter, support udp");
1333                                 return -rte_errno;
1334                         }
1335
1336                         rule->mode = HINIC_FDIR_MODE_TCAM;
1337                         rule->mask.tunnel_flag = UINT16_MAX;
1338                         rule->mask.tunnel_inner_src_port_mask =
1339                                                         udp_mask->hdr.src_port;
1340                         rule->mask.tunnel_inner_dst_port_mask =
1341                                                         udp_mask->hdr.dst_port;
1342                         rule->mask.proto_mask = UINT16_MAX;
1343
1344                         rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1345                         if (item->spec) {
1346                                 udp_spec =
1347                                 (const struct rte_flow_item_udp *)item->spec;
1348                                 rule->hinic_fdir.tunnel_inner_src_port =
1349                                                         udp_spec->hdr.src_port;
1350                                 rule->hinic_fdir.tunnel_inner_dst_port =
1351                                                         udp_spec->hdr.dst_port;
1352                         }
1353                 } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1354                         rule->mode = HINIC_FDIR_MODE_TCAM;
1355                         rule->mask.tunnel_flag = UINT16_MAX;
1356                 } else {
1357                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1358                         rte_flow_error_set(error, EINVAL,
1359                                 RTE_FLOW_ERROR_TYPE_ITEM,
1360                                 item, "Not supported by fdir filter, support tcp/udp");
1361                         return -rte_errno;
1362                 }
1363
1364                 /* get next no void item */
1365                 *in_out_item = next_no_void_pattern(pattern, item);
1366         }
1367
1368         return 0;
1369 }
1370
1371 static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item,
1372                                         const struct rte_flow_item pattern[],
1373                                         struct hinic_fdir_rule *rule,
1374                                         struct rte_flow_error *error)
1375 {
1376         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1377                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1378                 hinic_tunnel_item_check_l4(&item, pattern, rule, error) ||
1379                 hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) ||
1380                 hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) ||
1381                 hinic_normal_item_check_end(item, rule, error))
1382                 return -rte_errno;
1383
1384         return 0;
1385 }
1386
1387 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1388                                         struct hinic_fdir_rule *rule,
1389                                         struct rte_flow_error *error)
1390 {
1391         /* Must be input direction */
1392         if (!attr->ingress) {
1393                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1394                 rte_flow_error_set(error, EINVAL,
1395                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1396                                    attr, "Only support ingress.");
1397                 return -rte_errno;
1398         }
1399
1400         /* Not supported */
1401         if (attr->egress) {
1402                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1403                 rte_flow_error_set(error, EINVAL,
1404                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1405                                    attr, "Not support egress.");
1406                 return -rte_errno;
1407         }
1408
1409         /* Not supported */
1410         if (attr->priority) {
1411                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1412                 rte_flow_error_set(error, EINVAL,
1413                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1414                         attr, "Not support priority.");
1415                 return -rte_errno;
1416         }
1417
1418         return 0;
1419 }
1420
1421 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1422                                 const struct rte_flow_action actions[],
1423                                 struct hinic_fdir_rule *rule,
1424                                 struct rte_flow_error *error)
1425 {
1426         const struct rte_flow_action *act;
1427
1428         /* Check if the first not void action is QUEUE */
1429         act = next_no_void_action(actions, NULL);
1430         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1431                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1432                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1433                         item, "Not supported action.");
1434                 return -rte_errno;
1435         }
1436
1437         rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1438
1439         /* Check if the next not void item is END */
1440         act = next_no_void_action(actions, act);
1441         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1442                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1443                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1444                         act, "Not supported action.");
1445                 return -rte_errno;
1446         }
1447
1448         return 0;
1449 }
1450
1451 /**
1452  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1453  * And get the flow director filter info BTW.
1454  * UDP/TCP/SCTP PATTERN:
1455  * The first not void item can be ETH or IPV4 or IPV6
1456  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1457  * The next not void item could be UDP or TCP(optional)
1458  * The next not void item must be END.
1459  * ACTION:
1460  * The first not void action should be QUEUE.
1461  * The second not void optional action should be MARK,
1462  * mark_id is a uint32_t number.
1463  * The next not void action should be END.
1464  * UDP/TCP pattern example:
1465  * ITEM          Spec                                       Mask
1466  * ETH            NULL                                    NULL
1467  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1468  *                   dst_addr  1.2.3.5                 0xFFFFFFFF
1469  * UDP/TCP    src_port  80                         0xFFFF
1470  *                   dst_port  80                         0xFFFF
1471  * END
1472  * Other members in mask and spec should set to 0x00.
1473  * Item->last should be NULL.
1474  */
1475 static int
1476 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1477                                const struct rte_flow_item pattern[],
1478                                const struct rte_flow_action actions[],
1479                                struct hinic_fdir_rule *rule,
1480                                struct rte_flow_error *error)
1481 {
1482         const struct rte_flow_item *item = NULL;
1483
1484         if (hinic_check_filter_arg(attr, pattern, actions, error))
1485                 return -rte_errno;
1486
1487         if (hinic_check_normal_item_ele(item, pattern, rule, error))
1488                 return -rte_errno;
1489
1490         if (hinic_check_normal_attr_ele(attr, rule, error))
1491                 return -rte_errno;
1492
1493         if (hinic_check_normal_act_ele(item, actions, rule, error))
1494                 return -rte_errno;
1495
1496         return 0;
1497 }
1498
1499 /**
1500  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1501  * And get the flow director filter info BTW.
1502  * UDP/TCP/SCTP PATTERN:
1503  * The first not void item can be ETH or IPV4 or IPV6
1504  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1505  * The next not void item can be ANY/TCP/UDP
1506  * ACTION:
1507  * The first not void action should be QUEUE.
1508  * The second not void optional action should be MARK,
1509  * mark_id is a uint32_t number.
1510  * The next not void action should be END.
1511  * UDP/TCP pattern example:
1512  * ITEM                 Spec                           Mask
1513  * ETH            NULL                                 NULL
1514  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1515  *                dst_addr  1.2.3.5                 0xFFFFFFFF
1516  * UDP/TCP        src_port  80                      0xFFFF
1517  *                dst_port  80                      0xFFFF
1518  * END
1519  * Other members in mask and spec should set to 0x00.
1520  * Item->last should be NULL.
1521  */
1522 static int
1523 hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr,
1524                                const struct rte_flow_item pattern[],
1525                                const struct rte_flow_action actions[],
1526                                struct hinic_fdir_rule *rule,
1527                                struct rte_flow_error *error)
1528 {
1529         const struct rte_flow_item *item = NULL;
1530
1531         if (hinic_check_filter_arg(attr, pattern, actions, error))
1532                 return -rte_errno;
1533
1534         if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error))
1535                 return -rte_errno;
1536
1537         if (hinic_check_normal_attr_ele(attr, rule, error))
1538                 return -rte_errno;
1539
1540         if (hinic_check_normal_act_ele(item, actions, rule, error))
1541                 return -rte_errno;
1542
1543         return 0;
1544 }
1545
1546 /**
1547  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1548  * And get the flow director filter info BTW.
1549  * UDP/TCP/SCTP PATTERN:
1550  * The first not void item can be ETH or IPV4 or IPV6
1551  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1552  * The next not void item must be UDP
1553  * The next not void item must be VXLAN(optional)
1554  * The first not void item can be ETH or IPV4 or IPV6
1555  * The next not void item could be ANY or UDP or TCP(optional)
1556  * The next not void item must be END.
1557  * ACTION:
1558  * The first not void action should be QUEUE.
1559  * The second not void optional action should be MARK,
1560  * mark_id is a uint32_t number.
1561  * The next not void action should be END.
1562  * UDP/TCP pattern example:
1563  * ITEM             Spec                            Mask
1564  * ETH            NULL                              NULL
1565  * IPV4        src_addr  1.2.3.6                 0xFFFFFFFF
1566  *             dst_addr  1.2.3.5                 0xFFFFFFFF
1567  * UDP            NULL                              NULL
1568  * VXLAN          NULL                              NULL
1569  * UDP/TCP     src_port  80                      0xFFFF
1570  *             dst_port  80                      0xFFFF
1571  * END
1572  * Other members in mask and spec should set to 0x00.
1573  * Item->last should be NULL.
1574  */
1575 static int
1576 hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr,
1577                                const struct rte_flow_item pattern[],
1578                                const struct rte_flow_action actions[],
1579                                struct hinic_fdir_rule *rule,
1580                                struct rte_flow_error *error)
1581 {
1582         const struct rte_flow_item *item = NULL;
1583
1584         if (hinic_check_filter_arg(attr, pattern, actions, error))
1585                 return -rte_errno;
1586
1587         if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error))
1588                 return -rte_errno;
1589
1590         if (hinic_check_normal_attr_ele(attr, rule, error))
1591                 return -rte_errno;
1592
1593         if (hinic_check_normal_act_ele(item, actions, rule, error))
1594                 return -rte_errno;
1595
1596         return 0;
1597 }
1598
1599 static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1600                         const struct rte_flow_attr *attr,
1601                         const struct rte_flow_item pattern[],
1602                         const struct rte_flow_action actions[],
1603                         struct hinic_fdir_rule *rule,
1604                         struct rte_flow_error *error)
1605 {
1606         int ret;
1607
1608         ret = hinic_parse_fdir_filter_normal(attr, pattern, actions,
1609                                                 rule, error);
1610         if (!ret)
1611                 goto step_next;
1612
1613         ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions,
1614                                                 rule, error);
1615         if (!ret)
1616                 goto step_next;
1617
1618         ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions,
1619                                                 rule, error);
1620         if (ret)
1621                 return ret;
1622
1623 step_next:
1624         if (rule->queue >= dev->data->nb_rx_queues)
1625                 return -ENOTSUP;
1626
1627         return ret;
1628 }
1629
1630 /**
1631  * Check if the flow rule is supported by nic.
1632  * It only checkes the format. Don't guarantee the rule can be programmed into
1633  * the HW. Because there can be no enough room for the rule.
1634  */
1635 static int hinic_flow_validate(struct rte_eth_dev *dev,
1636                                 const struct rte_flow_attr *attr,
1637                                 const struct rte_flow_item pattern[],
1638                                 const struct rte_flow_action actions[],
1639                                 struct rte_flow_error *error)
1640 {
1641         struct rte_eth_ethertype_filter ethertype_filter;
1642         struct rte_eth_ntuple_filter ntuple_filter;
1643         struct hinic_fdir_rule fdir_rule;
1644         int ret;
1645
1646         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1647         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1648                                 actions, &ntuple_filter, error);
1649         if (!ret)
1650                 return 0;
1651
1652         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1653         ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1654                                 actions, &ethertype_filter, error);
1655
1656         if (!ret)
1657                 return 0;
1658
1659         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1660         ret = hinic_parse_fdir_filter(dev, attr, pattern,
1661                                 actions, &fdir_rule, error);
1662
1663         return ret;
1664 }
1665
1666 static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1667                  struct hinic_5tuple_filter_info *hinic_filter_info)
1668 {
1669         switch (filter->dst_ip_mask) {
1670         case UINT32_MAX:
1671                 hinic_filter_info->dst_ip_mask = 0;
1672                 hinic_filter_info->dst_ip = filter->dst_ip;
1673                 break;
1674         case 0:
1675                 hinic_filter_info->dst_ip_mask = 1;
1676                 hinic_filter_info->dst_ip = 0;
1677                 break;
1678         default:
1679                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1680                 return -EINVAL;
1681         }
1682
1683         switch (filter->src_ip_mask) {
1684         case UINT32_MAX:
1685                 hinic_filter_info->src_ip_mask = 0;
1686                 hinic_filter_info->src_ip = filter->src_ip;
1687                 break;
1688         case 0:
1689                 hinic_filter_info->src_ip_mask = 1;
1690                 hinic_filter_info->src_ip = 0;
1691                 break;
1692         default:
1693                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1694                 return -EINVAL;
1695         }
1696         return 0;
1697 }
1698
1699 static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1700                    struct hinic_5tuple_filter_info *hinic_filter_info)
1701 {
1702         switch (filter->dst_port_mask) {
1703         case UINT16_MAX:
1704                 hinic_filter_info->dst_port_mask = 0;
1705                 hinic_filter_info->dst_port = filter->dst_port;
1706                 break;
1707         case 0:
1708                 hinic_filter_info->dst_port_mask = 1;
1709                 hinic_filter_info->dst_port = 0;
1710                 break;
1711         default:
1712                 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1713                 return -EINVAL;
1714         }
1715
1716         switch (filter->src_port_mask) {
1717         case UINT16_MAX:
1718                 hinic_filter_info->src_port_mask = 0;
1719                 hinic_filter_info->src_port = filter->src_port;
1720                 break;
1721         case 0:
1722                 hinic_filter_info->src_port_mask = 1;
1723                 hinic_filter_info->src_port = 0;
1724                 break;
1725         default:
1726                 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1727                 return -EINVAL;
1728         }
1729
1730         return 0;
1731 }
1732
1733 static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1734                     struct hinic_5tuple_filter_info *hinic_filter_info)
1735 {
1736         switch (filter->proto_mask) {
1737         case UINT8_MAX:
1738                 hinic_filter_info->proto_mask = 0;
1739                 hinic_filter_info->proto = filter->proto;
1740                 break;
1741         case 0:
1742                 hinic_filter_info->proto_mask = 1;
1743                 hinic_filter_info->proto = 0;
1744                 break;
1745         default:
1746                 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1747                 return -EINVAL;
1748         }
1749
1750         return 0;
1751 }
1752
1753 static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1754                         struct hinic_5tuple_filter_info *filter_info)
1755 {
1756         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1757                 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1758                 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1759                 return -EINVAL;
1760
1761         if (ntuple_ip_filter(filter, filter_info) ||
1762                 ntuple_port_filter(filter, filter_info) ||
1763                 ntuple_proto_filter(filter, filter_info))
1764                 return -EINVAL;
1765
1766         filter_info->priority = (uint8_t)filter->priority;
1767         return 0;
1768 }
1769
1770 static inline struct hinic_5tuple_filter *
1771 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1772                            struct hinic_5tuple_filter_info *key)
1773 {
1774         struct hinic_5tuple_filter *it;
1775
1776         TAILQ_FOREACH(it, filter_list, entries) {
1777                 if (memcmp(key, &it->filter_info,
1778                         sizeof(struct hinic_5tuple_filter_info)) == 0) {
1779                         return it;
1780                 }
1781         }
1782
1783         return NULL;
1784 }
1785
1786 static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
1787 {
1788         struct tag_pa_rule lacp_rule;
1789         struct tag_pa_action lacp_action;
1790
1791         memset(&lacp_rule, 0, sizeof(lacp_rule));
1792         memset(&lacp_action, 0, sizeof(lacp_action));
1793         /* LACP TCAM rule */
1794         lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
1795         lacp_rule.l2_header.eth_type.val16 = 0x8809;
1796         lacp_rule.l2_header.eth_type.mask16 = 0xffff;
1797
1798         /* LACP TCAM action */
1799         lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
1800         lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1801         lacp_action.pkt_type = PKT_LACP_TYPE;
1802         lacp_action.pri = 0x0;
1803         lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
1804
1805         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
1806                                         &lacp_rule, &lacp_action);
1807 }
1808
1809 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1810 {
1811         struct tag_pa_rule bgp_rule;
1812         struct tag_pa_action bgp_action;
1813
1814         memset(&bgp_rule, 0, sizeof(bgp_rule));
1815         memset(&bgp_action, 0, sizeof(bgp_action));
1816         /* BGP TCAM rule */
1817         bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1818         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1819         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1820         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1821         bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1822         bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1823
1824         /* BGP TCAM action */
1825         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1826         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1827         bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1828         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1829                                * results, not need to convert
1830                                */
1831         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1832
1833         return hinic_set_fdir_tcam(nic_dev->hwdev,
1834                         TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1835 }
1836
1837 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1838 {
1839         struct tag_pa_rule bgp_rule;
1840         struct tag_pa_action bgp_action;
1841
1842         memset(&bgp_rule, 0, sizeof(bgp_rule));
1843         memset(&bgp_action, 0, sizeof(bgp_action));
1844         /* BGP TCAM rule */
1845         bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1846         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1847         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1848         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1849         bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1850         bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1851
1852         /* BGP TCAM action */
1853         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1854         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1855         bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1856         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1857                                * results, not need to convert
1858                                */
1859         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1860
1861         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1862                                         &bgp_rule, &bgp_action);
1863 }
1864
1865 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1866 {
1867         struct tag_pa_rule vrrp_rule;
1868         struct tag_pa_action vrrp_action;
1869
1870         memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1871         memset(&vrrp_action, 0, sizeof(vrrp_action));
1872         /* VRRP TCAM rule */
1873         vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1874         vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1875         vrrp_rule.ip_header.protocol.mask8 = 0xff;
1876         vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1877
1878         /* VRRP TCAM action */
1879         vrrp_action.err_type = 0x3f;
1880         vrrp_action.fwd_action = 0x7;
1881         vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1882         vrrp_action.pri = 0xf;
1883         vrrp_action.push_len = 0xf;
1884
1885         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1886                                         &vrrp_rule, &vrrp_action);
1887 }
1888
1889 /**
1890  *  Clear all fdir configuration.
1891  *
1892  * @param nic_dev
1893  *   The hardware interface of a Ethernet device.
1894  *
1895  * @return
1896  *   0 on success,
1897  *   negative error value otherwise.
1898  */
1899 void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
1900 {
1901         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
1902
1903         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
1904
1905         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
1906
1907         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1908
1909         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
1910
1911         (void)hinic_flush_tcam_rule(nic_dev->hwdev);
1912 }
1913
1914 static int hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1915                        struct hinic_filter_info *filter_info)
1916 {
1917         switch (filter->filter_info.proto) {
1918         case IPPROTO_TCP:
1919                 /* Filter type is bgp type if dst_port or src_port is 179 */
1920                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1921                         !(filter->filter_info.dst_port_mask)) {
1922                         filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1923                 } else if (filter->filter_info.src_port ==
1924                         RTE_BE16(BGP_DPORT_ID) &&
1925                         !(filter->filter_info.src_port_mask)) {
1926                         filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1927                 } else {
1928                         PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1929                         " just support BGP now, proto:0x%x, "
1930                         "dst_port:0x%x, dst_port_mask:0x%x."
1931                         "src_port:0x%x, src_port_mask:0x%x.",
1932                         filter->filter_info.proto,
1933                         filter->filter_info.dst_port,
1934                         filter->filter_info.dst_port_mask,
1935                         filter->filter_info.src_port,
1936                         filter->filter_info.src_port_mask);
1937                         return -EINVAL;
1938                 }
1939                 break;
1940
1941         case IPPROTO_VRRP:
1942                 filter_info->pkt_type = PKT_VRRP_TYPE;
1943                 break;
1944
1945         case IPPROTO_ICMP:
1946                 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1947                 break;
1948
1949         case IPPROTO_ICMPV6:
1950                 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1951                 break;
1952
1953         default:
1954                 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1955                 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1956                 "src_port: 0x%x, src_port_mask: 0x%x.",
1957                 filter->filter_info.proto, filter->filter_info.dst_port,
1958                 filter->filter_info.dst_port_mask,
1959                 filter->filter_info.src_port,
1960                 filter->filter_info.src_port_mask);
1961                 return -EINVAL;
1962         }
1963
1964         return 0;
1965 }
1966
1967 static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1968                         struct hinic_filter_info *filter_info, int *index)
1969 {
1970         int type_id;
1971
1972         type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1973
1974         if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1975                 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1976                 return -EINVAL;
1977         }
1978
1979         if (!(filter_info->type_mask & (1 << type_id))) {
1980                 filter_info->type_mask |= 1 << type_id;
1981                 filter->index = type_id;
1982                 filter_info->pkt_filters[type_id].enable = true;
1983                 filter_info->pkt_filters[type_id].pkt_proto =
1984                                                 filter->filter_info.proto;
1985                 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1986                                   filter, entries);
1987         } else {
1988                 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1989                 return -EIO;
1990         }
1991
1992         *index = type_id;
1993         return 0;
1994 }
1995
1996 /*
1997  * Add a 5tuple filter
1998  *
1999  * @param dev:
2000  *  Pointer to struct rte_eth_dev.
2001  * @param filter:
2002  *  Pointer to the filter that will be added.
2003  * @return
2004  *    - On success, zero.
2005  *    - On failure, a negative value.
2006  */
2007 static int hinic_add_5tuple_filter(struct rte_eth_dev *dev,
2008                                 struct hinic_5tuple_filter *filter)
2009 {
2010         struct hinic_filter_info *filter_info =
2011                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2012         int i, ret_fw;
2013         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2014
2015         if (hinic_filter_info_init(filter, filter_info) ||
2016                 hinic_lookup_new_filter(filter, filter_info, &i))
2017                 return -EFAULT;
2018
2019         ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2020                                         filter_info->qid,
2021                                         filter_info->pkt_filters[i].enable,
2022                                         true);
2023         if (ret_fw) {
2024                 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2025                         filter_info->pkt_type, filter->queue,
2026                         filter_info->pkt_filters[i].enable);
2027                 return -EFAULT;
2028         }
2029
2030         PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2031                         filter_info->pkt_type, filter_info->qid,
2032                         filter_info->pkt_filters[filter->index].enable);
2033
2034         switch (filter->filter_info.proto) {
2035         case IPPROTO_TCP:
2036                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
2037                         ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
2038                         if (ret_fw) {
2039                                 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
2040                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
2041                                         filter_info->pkt_type, filter->queue,
2042                                         filter_info->pkt_filters[i].enable);
2043                                 return -EFAULT;
2044                         }
2045
2046                         PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
2047                                 filter->queue,
2048                                 filter_info->pkt_filters[i].enable);
2049                 } else if (filter->filter_info.src_port ==
2050                         RTE_BE16(BGP_DPORT_ID)) {
2051                         ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
2052                         if (ret_fw) {
2053                                 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
2054                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
2055                                         filter_info->pkt_type, filter->queue,
2056                                         filter_info->pkt_filters[i].enable);
2057                                 return -EFAULT;
2058                         }
2059
2060                         PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
2061                                         filter->queue,
2062                                         filter_info->pkt_filters[i].enable);
2063                 }
2064
2065                 break;
2066
2067         case IPPROTO_VRRP:
2068                 ret_fw = hinic_set_vrrp_tcam(nic_dev);
2069                 if (ret_fw) {
2070                         PMD_DRV_LOG(ERR, "Set VRRP failed, "
2071                                 "type: 0x%x, qid: 0x%x, enable: 0x%x",
2072                                 filter_info->pkt_type, filter->queue,
2073                                 filter_info->pkt_filters[i].enable);
2074                         return -EFAULT;
2075                 }
2076                 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
2077                                 filter->queue,
2078                                 filter_info->pkt_filters[i].enable);
2079                 break;
2080
2081         default:
2082                 break;
2083         }
2084
2085         return 0;
2086 }
2087
2088 /*
2089  * Remove a 5tuple filter
2090  *
2091  * @param dev
2092  *  Pointer to struct rte_eth_dev.
2093  * @param filter
2094  *  The pointer of the filter will be removed.
2095  */
2096 static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
2097                            struct hinic_5tuple_filter *filter)
2098 {
2099         struct hinic_filter_info *filter_info =
2100                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2101         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2102
2103         switch (filter->filter_info.proto) {
2104         case IPPROTO_VRRP:
2105                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
2106                 break;
2107
2108         case IPPROTO_TCP:
2109                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
2110                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2111                                                         TCAM_PKT_BGP_DPORT);
2112                 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
2113                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2114                                                         TCAM_PKT_BGP_SPORT);
2115                 break;
2116
2117         default:
2118                 break;
2119         }
2120
2121         hinic_filter_info_init(filter, filter_info);
2122
2123         filter_info->pkt_filters[filter->index].enable = false;
2124         filter_info->pkt_filters[filter->index].pkt_proto = 0;
2125
2126         PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2127                 filter_info->pkt_type,
2128                 filter_info->pkt_filters[filter->index].qid,
2129                 filter_info->pkt_filters[filter->index].enable);
2130         (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2131                                 filter_info->pkt_filters[filter->index].qid,
2132                                 filter_info->pkt_filters[filter->index].enable,
2133                                 true);
2134
2135         filter_info->pkt_type = 0;
2136         filter_info->qid = 0;
2137         filter_info->pkt_filters[filter->index].qid = 0;
2138         filter_info->type_mask &= ~(1 <<  (filter->index));
2139         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
2140
2141         rte_free(filter);
2142 }
2143
2144 /*
2145  * Add or delete a ntuple filter
2146  *
2147  * @param dev
2148  *  Pointer to struct rte_eth_dev.
2149  * @param ntuple_filter
2150  *  Pointer to struct rte_eth_ntuple_filter
2151  * @param add
2152  *  If true, add filter; if false, remove filter
2153  * @return
2154  *    - On success, zero.
2155  *    - On failure, a negative value.
2156  */
2157 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
2158                                 struct rte_eth_ntuple_filter *ntuple_filter,
2159                                 bool add)
2160 {
2161         struct hinic_filter_info *filter_info =
2162                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2163         struct hinic_5tuple_filter_info filter_5tuple;
2164         struct hinic_5tuple_filter *filter;
2165         int ret;
2166
2167         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
2168                 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
2169                 return -EINVAL;
2170         }
2171
2172         memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
2173         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
2174         if (ret < 0)
2175                 return ret;
2176
2177         filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
2178                                          &filter_5tuple);
2179         if (filter != NULL && add) {
2180                 PMD_DRV_LOG(ERR, "Filter exists.");
2181                 return -EEXIST;
2182         }
2183         if (filter == NULL && !add) {
2184                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2185                 return -ENOENT;
2186         }
2187
2188         if (add) {
2189                 filter = rte_zmalloc("hinic_5tuple_filter",
2190                                 sizeof(struct hinic_5tuple_filter), 0);
2191                 if (filter == NULL)
2192                         return -ENOMEM;
2193                 rte_memcpy(&filter->filter_info, &filter_5tuple,
2194                                 sizeof(struct hinic_5tuple_filter_info));
2195                 filter->queue = ntuple_filter->queue;
2196
2197                 filter_info->qid = ntuple_filter->queue;
2198
2199                 ret = hinic_add_5tuple_filter(dev, filter);
2200                 if (ret)
2201                         rte_free(filter);
2202
2203                 return ret;
2204         }
2205
2206         hinic_remove_5tuple_filter(dev, filter);
2207
2208         return 0;
2209 }
2210
2211 static inline int
2212 hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
2213 {
2214         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
2215                 return -EINVAL;
2216
2217         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2218                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
2219                 PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
2220                         " ethertype filter", filter->ether_type);
2221                 return -EINVAL;
2222         }
2223
2224         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
2225                 PMD_DRV_LOG(ERR, "Mac compare is not supported");
2226                 return -EINVAL;
2227         }
2228         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2229                 PMD_DRV_LOG(ERR, "Drop option is not supported");
2230                 return -EINVAL;
2231         }
2232
2233         return 0;
2234 }
2235
2236 static inline int
2237 hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
2238                               struct hinic_pkt_filter *ethertype_filter)
2239 {
2240         switch (ethertype_filter->pkt_proto) {
2241         case RTE_ETHER_TYPE_SLOW:
2242                 filter_info->pkt_type = PKT_LACP_TYPE;
2243                 break;
2244
2245         case RTE_ETHER_TYPE_ARP:
2246                 filter_info->pkt_type = PKT_ARP_TYPE;
2247                 break;
2248
2249         default:
2250                 PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
2251                 return -EIO;
2252         }
2253
2254         return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
2255 }
2256
2257 static inline int
2258 hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
2259                               struct hinic_pkt_filter *ethertype_filter)
2260 {
2261         int id;
2262
2263         /* Find LACP or VRRP type id */
2264         id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
2265         if (id < 0)
2266                 return -EINVAL;
2267
2268         if (!(filter_info->type_mask & (1 << id))) {
2269                 filter_info->type_mask |= 1 << id;
2270                 filter_info->pkt_filters[id].pkt_proto =
2271                         ethertype_filter->pkt_proto;
2272                 filter_info->pkt_filters[id].enable = ethertype_filter->enable;
2273                 filter_info->qid = ethertype_filter->qid;
2274                 return id;
2275         }
2276
2277         PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
2278         return -EINVAL;
2279 }
2280
2281 static inline void
2282 hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
2283                               uint8_t idx)
2284 {
2285         if (idx >= HINIC_MAX_Q_FILTERS)
2286                 return;
2287
2288         filter_info->pkt_type = 0;
2289         filter_info->type_mask &= ~(1 << idx);
2290         filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
2291         filter_info->pkt_filters[idx].enable = FALSE;
2292         filter_info->pkt_filters[idx].qid = 0;
2293 }
2294
2295 static inline int
2296 hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
2297                                struct rte_eth_ethertype_filter *filter,
2298                                bool add)
2299 {
2300         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2301         struct hinic_filter_info *filter_info =
2302                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2303         struct hinic_pkt_filter ethertype_filter;
2304         int i;
2305         int ret_fw;
2306
2307         if (hinic_check_ethertype_filter(filter))
2308                 return -EINVAL;
2309
2310         if (add) {
2311                 ethertype_filter.pkt_proto = filter->ether_type;
2312                 ethertype_filter.enable = TRUE;
2313                 ethertype_filter.qid = (u8)filter->queue;
2314                 i = hinic_ethertype_filter_insert(filter_info,
2315                                                     &ethertype_filter);
2316                 if (i < 0)
2317                         return -ENOSPC;
2318
2319                 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
2320                                 filter_info->pkt_type, filter_info->qid,
2321                                 filter_info->pkt_filters[i].enable, true);
2322                 if (ret_fw) {
2323                         PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2324                                 filter_info->pkt_type, filter->queue,
2325                                 filter_info->pkt_filters[i].enable);
2326
2327                         hinic_ethertype_filter_remove(filter_info, i);
2328                         return -ENOENT;
2329                 }
2330                 PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2331                                 filter_info->pkt_type, filter->queue,
2332                                 filter_info->pkt_filters[i].enable);
2333
2334                 switch (ethertype_filter.pkt_proto) {
2335                 case RTE_ETHER_TYPE_SLOW:
2336                         ret_fw = hinic_set_lacp_tcam(nic_dev);
2337                         if (ret_fw) {
2338                                 PMD_DRV_LOG(ERR, "Add lacp tcam failed");
2339                                 hinic_ethertype_filter_remove(filter_info, i);
2340                                 return -ENOENT;
2341                         }
2342
2343                         PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
2344                         break;
2345                 default:
2346                         break;
2347                 }
2348         } else {
2349                 ethertype_filter.pkt_proto = filter->ether_type;
2350                 i = hinic_ethertype_filter_lookup(filter_info,
2351                                                 &ethertype_filter);
2352
2353                 if ((filter_info->type_mask & (1 << i))) {
2354                         filter_info->pkt_filters[i].enable = FALSE;
2355                         (void)hinic_set_fdir_filter(nic_dev->hwdev,
2356                                         filter_info->pkt_type,
2357                                         filter_info->pkt_filters[i].qid,
2358                                         filter_info->pkt_filters[i].enable,
2359                                         true);
2360
2361                         PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2362                                         filter_info->pkt_type,
2363                                         filter_info->pkt_filters[i].qid,
2364                                         filter_info->pkt_filters[i].enable);
2365
2366                         switch (ethertype_filter.pkt_proto) {
2367                         case RTE_ETHER_TYPE_SLOW:
2368                                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2369                                                                 TCAM_PKT_LACP);
2370                                 PMD_DRV_LOG(INFO, "Del lacp tcam succeed");
2371                                 break;
2372                         default:
2373                                 break;
2374                         }
2375
2376                         hinic_ethertype_filter_remove(filter_info, i);
2377
2378                 } else {
2379                         PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
2380                                         filter_info->pkt_type, filter->queue,
2381                                         filter_info->pkt_filters[i].enable);
2382                         return -ENOENT;
2383                 }
2384         }
2385
2386         return 0;
2387 }
2388
2389 static int hinic_fdir_info_init(struct hinic_fdir_rule *rule,
2390                                 struct hinic_fdir_info *fdir_info)
2391 {
2392         switch (rule->mask.src_ipv4_mask) {
2393         case UINT32_MAX:
2394                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
2395                 fdir_info->qid = rule->queue;
2396                 fdir_info->fdir_key = rule->hinic_fdir.src_ip;
2397                 return 0;
2398
2399         case 0:
2400                 break;
2401
2402         default:
2403                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
2404                 return -EINVAL;
2405         }
2406
2407         switch (rule->mask.dst_ipv4_mask) {
2408         case UINT32_MAX:
2409                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
2410                 fdir_info->qid = rule->queue;
2411                 fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
2412                 return 0;
2413
2414         case 0:
2415                 break;
2416
2417         default:
2418                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
2419                 return -EINVAL;
2420         }
2421
2422         if (fdir_info->fdir_flag == 0) {
2423                 PMD_DRV_LOG(ERR, "All support mask is NULL.");
2424                 return -EINVAL;
2425         }
2426
2427         return 0;
2428 }
2429
2430 static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
2431                                         struct hinic_fdir_rule *rule, bool add)
2432 {
2433         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2434         struct hinic_fdir_info fdir_info;
2435         int ret;
2436
2437         memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
2438
2439         ret = hinic_fdir_info_init(rule, &fdir_info);
2440         if (ret) {
2441                 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2442                 return ret;
2443         }
2444
2445         if (add) {
2446                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2447                                                 true, fdir_info.fdir_key,
2448                                                 true, fdir_info.fdir_flag);
2449                 if (ret) {
2450                         PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2451                                         fdir_info.fdir_flag, fdir_info.qid,
2452                                         fdir_info.fdir_key);
2453                         return -ENOENT;
2454                 }
2455                 PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2456                                 fdir_info.fdir_flag, fdir_info.qid,
2457                                 fdir_info.fdir_key);
2458         } else {
2459                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2460                                                 false, fdir_info.fdir_key, true,
2461                                                 fdir_info.fdir_flag);
2462                 if (ret) {
2463                         PMD_DRV_LOG(ERR, "Del fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2464                                 fdir_info.fdir_flag, fdir_info.qid,
2465                                 fdir_info.fdir_key);
2466                         return -ENOENT;
2467                 }
2468                 PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2469                                 fdir_info.fdir_flag, fdir_info.qid,
2470                                 fdir_info.fdir_key);
2471         }
2472
2473         return 0;
2474 }
2475
2476 static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len)
2477 {
2478         u8 idx;
2479
2480         for (idx = 0; idx < len; idx++)
2481                 key_y[idx] = src_input[idx] & mask[idx];
2482 }
2483
2484 static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len)
2485 {
2486         u8 idx;
2487
2488         for (idx = 0; idx < len; idx++)
2489                 key_x[idx] = key_y[idx] ^ mask[idx];
2490 }
2491
2492 static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
2493                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2494 {
2495         tcam_translate_key_y(fdir_tcam_rule->key.y,
2496                 (u8 *)(&tcam_key->key_info),
2497                 (u8 *)(&tcam_key->key_mask),
2498                 TCAM_FLOW_KEY_SIZE);
2499         tcam_translate_key_x(fdir_tcam_rule->key.x,
2500                 fdir_tcam_rule->key.y,
2501                 (u8 *)(&tcam_key->key_mask),
2502                 TCAM_FLOW_KEY_SIZE);
2503 }
2504
2505 static int hinic_fdir_tcam_ipv4_init(struct rte_eth_dev *dev,
2506                                      struct hinic_fdir_rule *rule,
2507                                      struct tag_tcam_key *tcam_key)
2508 {
2509         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2510
2511         switch (rule->mask.dst_ipv4_mask) {
2512         case UINT32_MAX:
2513                 tcam_key->key_info.ext_dip_h =
2514                         (rule->hinic_fdir.dst_ip >> 16) & 0xffffU;
2515                 tcam_key->key_info.ext_dip_l =
2516                         rule->hinic_fdir.dst_ip & 0xffffU;
2517                 tcam_key->key_mask.ext_dip_h =
2518                         (rule->mask.dst_ipv4_mask >> 16) & 0xffffU;
2519                 tcam_key->key_mask.ext_dip_l =
2520                         rule->mask.dst_ipv4_mask & 0xffffU;
2521                 break;
2522
2523         case 0:
2524                 break;
2525
2526         default:
2527                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2528                 return -EINVAL;
2529         }
2530
2531         if (rule->mask.dst_port_mask > 0) {
2532                 tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port;
2533                 tcam_key->key_mask.dst_port = rule->mask.dst_port_mask;
2534         }
2535
2536         if (rule->mask.src_port_mask > 0) {
2537                 tcam_key->key_info.src_port = rule->hinic_fdir.src_port;
2538                 tcam_key->key_mask.src_port = rule->mask.src_port_mask;
2539         }
2540
2541         switch (rule->mask.tunnel_flag) {
2542         case UINT16_MAX:
2543                 tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET;
2544                 tcam_key->key_mask.tunnel_flag = UINT8_MAX;
2545                 break;
2546
2547         case 0:
2548                 tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET;
2549                 tcam_key->key_mask.tunnel_flag = 0;
2550                 break;
2551
2552         default:
2553                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2554                 return -EINVAL;
2555         }
2556
2557         if (rule->mask.tunnel_inner_dst_port_mask > 0) {
2558                 tcam_key->key_info.dst_port =
2559                                         rule->hinic_fdir.tunnel_inner_dst_port;
2560                 tcam_key->key_mask.dst_port =
2561                                         rule->mask.tunnel_inner_dst_port_mask;
2562         }
2563
2564         if (rule->mask.tunnel_inner_src_port_mask > 0) {
2565                 tcam_key->key_info.src_port =
2566                                         rule->hinic_fdir.tunnel_inner_src_port;
2567                 tcam_key->key_mask.src_port =
2568                                         rule->mask.tunnel_inner_src_port_mask;
2569         }
2570
2571         switch (rule->mask.proto_mask) {
2572         case UINT16_MAX:
2573                 tcam_key->key_info.protocol = rule->hinic_fdir.proto;
2574                 tcam_key->key_mask.protocol = UINT8_MAX;
2575                 break;
2576
2577         case 0:
2578                 break;
2579
2580         default:
2581                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2582                 return -EINVAL;
2583         }
2584
2585         tcam_key->key_mask.function_id = UINT16_MAX;
2586         tcam_key->key_info.function_id =
2587                 hinic_global_func_id(nic_dev->hwdev) & 0x7fff;
2588
2589         return 0;
2590 }
2591
2592 static int hinic_fdir_tcam_ipv6_init(struct rte_eth_dev *dev,
2593                                      struct hinic_fdir_rule *rule,
2594                                      struct tag_tcam_key *tcam_key)
2595 {
2596         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2597
2598         switch (rule->mask.dst_ipv6_mask) {
2599         case UINT16_MAX:
2600                 tcam_key->key_info_ipv6.ipv6_key0 =
2601                         ((rule->hinic_fdir.dst_ipv6[0] << 8) & 0xff00) |
2602                         rule->hinic_fdir.dst_ipv6[1];
2603                 tcam_key->key_info_ipv6.ipv6_key1 =
2604                         ((rule->hinic_fdir.dst_ipv6[2] << 8) & 0xff00) |
2605                         rule->hinic_fdir.dst_ipv6[3];
2606                 tcam_key->key_info_ipv6.ipv6_key2 =
2607                         ((rule->hinic_fdir.dst_ipv6[4] << 8) & 0xff00) |
2608                         rule->hinic_fdir.dst_ipv6[5];
2609                 tcam_key->key_info_ipv6.ipv6_key3 =
2610                         ((rule->hinic_fdir.dst_ipv6[6] << 8) & 0xff00) |
2611                         rule->hinic_fdir.dst_ipv6[7];
2612                 tcam_key->key_info_ipv6.ipv6_key4 =
2613                         ((rule->hinic_fdir.dst_ipv6[8] << 8) & 0xff00) |
2614                         rule->hinic_fdir.dst_ipv6[9];
2615                 tcam_key->key_info_ipv6.ipv6_key5 =
2616                         ((rule->hinic_fdir.dst_ipv6[10] << 8) & 0xff00) |
2617                         rule->hinic_fdir.dst_ipv6[11];
2618                 tcam_key->key_info_ipv6.ipv6_key6 =
2619                         ((rule->hinic_fdir.dst_ipv6[12] << 8) & 0xff00) |
2620                         rule->hinic_fdir.dst_ipv6[13];
2621                 tcam_key->key_info_ipv6.ipv6_key7 =
2622                         ((rule->hinic_fdir.dst_ipv6[14] << 8) & 0xff00) |
2623                         rule->hinic_fdir.dst_ipv6[15];
2624                 tcam_key->key_mask_ipv6.ipv6_key0 = UINT16_MAX;
2625                 tcam_key->key_mask_ipv6.ipv6_key1 = UINT16_MAX;
2626                 tcam_key->key_mask_ipv6.ipv6_key2 = UINT16_MAX;
2627                 tcam_key->key_mask_ipv6.ipv6_key3 = UINT16_MAX;
2628                 tcam_key->key_mask_ipv6.ipv6_key4 = UINT16_MAX;
2629                 tcam_key->key_mask_ipv6.ipv6_key5 = UINT16_MAX;
2630                 tcam_key->key_mask_ipv6.ipv6_key6 = UINT16_MAX;
2631                 tcam_key->key_mask_ipv6.ipv6_key7 = UINT16_MAX;
2632                 break;
2633
2634         case 0:
2635                 break;
2636
2637         default:
2638                 PMD_DRV_LOG(ERR, "invalid dst_ipv6 mask");
2639                 return -EINVAL;
2640         }
2641
2642         if (rule->mask.dst_port_mask > 0) {
2643                 tcam_key->key_info_ipv6.dst_port = rule->hinic_fdir.dst_port;
2644                 tcam_key->key_mask_ipv6.dst_port = rule->mask.dst_port_mask;
2645         }
2646
2647         switch (rule->mask.proto_mask) {
2648         case UINT16_MAX:
2649                 tcam_key->key_info_ipv6.protocol =
2650                         (rule->hinic_fdir.proto) & 0x7F;
2651                 tcam_key->key_mask_ipv6.protocol = 0x7F;
2652                 break;
2653
2654         case 0:
2655                 break;
2656
2657         default:
2658                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask");
2659                 return -EINVAL;
2660         }
2661
2662         tcam_key->key_info_ipv6.ipv6_flag = 1;
2663         tcam_key->key_mask_ipv6.ipv6_flag = 1;
2664
2665         tcam_key->key_mask_ipv6.function_id = UINT8_MAX;
2666         tcam_key->key_info_ipv6.function_id =
2667                         (u8)hinic_global_func_id(nic_dev->hwdev);
2668
2669         return 0;
2670 }
2671
2672 static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
2673                                      struct hinic_fdir_rule *rule,
2674                                      struct tag_tcam_key *tcam_key,
2675                                      struct tag_tcam_cfg_rule *fdir_tcam_rule)
2676 {
2677         int ret = -1;
2678
2679         if (rule->mask.dst_ipv4_mask == UINT32_MAX)
2680                 ret = hinic_fdir_tcam_ipv4_init(dev, rule, tcam_key);
2681         else if (rule->mask.dst_ipv6_mask == UINT16_MAX)
2682                 ret = hinic_fdir_tcam_ipv6_init(dev, rule, tcam_key);
2683
2684         if (ret < 0)
2685                 return ret;
2686
2687         fdir_tcam_rule->data.qid = rule->queue;
2688
2689         tcam_key_calculate(tcam_key, fdir_tcam_rule);
2690
2691         return 0;
2692 }
2693
2694 static inline struct hinic_tcam_filter *
2695 hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list,
2696                         struct tag_tcam_key *key)
2697 {
2698         struct hinic_tcam_filter *it;
2699
2700         TAILQ_FOREACH(it, filter_list, entries) {
2701                 if (memcmp(key, &it->tcam_key,
2702                         sizeof(struct tag_tcam_key)) == 0) {
2703                         return it;
2704                 }
2705         }
2706
2707         return NULL;
2708 }
2709
2710 static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev,
2711                                         struct hinic_tcam_info *tcam_info,
2712                                         struct hinic_tcam_filter *tcam_filter,
2713                                         u16 *tcam_index)
2714 {
2715         int index;
2716         int max_index;
2717         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2718
2719         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2720                 max_index = HINIC_VF_MAX_TCAM_FILTERS;
2721         else
2722                 max_index = HINIC_PF_MAX_TCAM_FILTERS;
2723
2724         for (index = 0; index < max_index; index++) {
2725                 if (tcam_info->tcam_index_array[index] == 0)
2726                         break;
2727         }
2728
2729         if (index == max_index) {
2730                 PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules",
2731                         hinic_global_func_id(nic_dev->hwdev), max_index);
2732                 return -EINVAL;
2733         }
2734
2735         tcam_filter->index = index;
2736         *tcam_index = index;
2737
2738         return 0;
2739 }
2740
2741 static int hinic_add_tcam_filter(struct rte_eth_dev *dev,
2742                                 struct hinic_tcam_filter *tcam_filter,
2743                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2744 {
2745         struct hinic_tcam_info *tcam_info =
2746                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2747         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2748         u16 index = 0;
2749         u16 tcam_block_index = 0;
2750         int rc;
2751
2752         if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index))
2753                 return -EINVAL;
2754
2755         if (tcam_info->tcam_rule_nums == 0) {
2756                 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2757                         rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2758                                 HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index);
2759                         if (rc != 0) {
2760                                 PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block failed!");
2761                                 return -EFAULT;
2762                         }
2763                 } else {
2764                         rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2765                                 HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index);
2766                         if (rc != 0) {
2767                                 PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block failed!");
2768                                 return -EFAULT;
2769                         }
2770                 }
2771
2772                 tcam_info->tcam_block_index = tcam_block_index;
2773         } else {
2774                 tcam_block_index = tcam_info->tcam_block_index;
2775         }
2776
2777         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2778                 fdir_tcam_rule->index =
2779                         HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index;
2780         } else {
2781                 fdir_tcam_rule->index =
2782                         tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index;
2783         }
2784
2785         rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule);
2786         if (rc != 0) {
2787                 PMD_DRV_LOG(ERR, "Fdir_tcam_rule add failed!");
2788                 return -EFAULT;
2789         }
2790
2791         PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x,"
2792                 "tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succeed",
2793                 hinic_global_func_id(nic_dev->hwdev), tcam_block_index,
2794                 fdir_tcam_rule->index, fdir_tcam_rule->data.qid,
2795                 tcam_info->tcam_rule_nums + 1);
2796
2797         if (tcam_info->tcam_rule_nums == 0) {
2798                 rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true);
2799                 if (rc < 0) {
2800                         (void)hinic_del_tcam_rule(nic_dev->hwdev,
2801                                                 fdir_tcam_rule->index);
2802                         return rc;
2803                 }
2804         }
2805
2806         TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries);
2807
2808         tcam_info->tcam_index_array[index] = 1;
2809         tcam_info->tcam_rule_nums++;
2810
2811         return 0;
2812 }
2813
2814 static int hinic_del_tcam_filter(struct rte_eth_dev *dev,
2815                                 struct hinic_tcam_filter *tcam_filter)
2816 {
2817         struct hinic_tcam_info *tcam_info =
2818                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2819         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2820         u32 index = 0;
2821         u16 tcam_block_index = tcam_info->tcam_block_index;
2822         int rc;
2823         u8 block_type = 0;
2824
2825         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2826                 index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) +
2827                         tcam_filter->index;
2828                 block_type = HINIC_TCAM_BLOCK_TYPE_VF;
2829         } else {
2830                 index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS +
2831                         tcam_filter->index;
2832                 block_type = HINIC_TCAM_BLOCK_TYPE_PF;
2833         }
2834
2835         rc = hinic_del_tcam_rule(nic_dev->hwdev, index);
2836         if (rc != 0) {
2837                 PMD_DRV_LOG(ERR, "fdir_tcam_rule del failed!");
2838                 return -EFAULT;
2839         }
2840
2841         PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, "
2842                 "tcam_block_id: %d, index: %d, tcam_rule_nums: %d succeed",
2843                 hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index,
2844                 tcam_info->tcam_rule_nums - 1);
2845
2846         TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries);
2847
2848         tcam_info->tcam_index_array[tcam_filter->index] = 0;
2849
2850         rte_free(tcam_filter);
2851
2852         tcam_info->tcam_rule_nums--;
2853
2854         if (tcam_info->tcam_rule_nums == 0) {
2855                 (void)hinic_free_tcam_block(nic_dev->hwdev, block_type,
2856                                         &tcam_block_index);
2857         }
2858
2859         return 0;
2860 }
2861
2862 static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
2863                                         struct hinic_fdir_rule *rule, bool add)
2864 {
2865         struct hinic_tcam_info *tcam_info =
2866                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2867         struct hinic_tcam_filter *tcam_filter;
2868         struct tag_tcam_cfg_rule fdir_tcam_rule;
2869         struct tag_tcam_key tcam_key;
2870         int ret;
2871
2872         memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule));
2873         memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key));
2874
2875         ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
2876         if (ret) {
2877                 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2878                 return ret;
2879         }
2880
2881         tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list,
2882                                                 &tcam_key);
2883         if (tcam_filter != NULL && add) {
2884                 PMD_DRV_LOG(ERR, "Filter exists.");
2885                 return -EEXIST;
2886         }
2887         if (tcam_filter == NULL && !add) {
2888                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2889                 return -ENOENT;
2890         }
2891
2892         if (add) {
2893                 tcam_filter = rte_zmalloc("hinic_5tuple_filter",
2894                                 sizeof(struct hinic_tcam_filter), 0);
2895                 if (tcam_filter == NULL)
2896                         return -ENOMEM;
2897                 (void)rte_memcpy(&tcam_filter->tcam_key,
2898                                  &tcam_key, sizeof(struct tag_tcam_key));
2899                 tcam_filter->queue = fdir_tcam_rule.data.qid;
2900
2901                 ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule);
2902                 if (ret < 0) {
2903                         rte_free(tcam_filter);
2904                         return ret;
2905                 }
2906
2907                 rule->tcam_index = fdir_tcam_rule.index;
2908
2909         } else {
2910                 PMD_DRV_LOG(INFO, "begin to hinic_del_tcam_filter");
2911                 ret = hinic_del_tcam_filter(dev, tcam_filter);
2912                 if (ret < 0)
2913                         return ret;
2914         }
2915
2916         return 0;
2917 }
2918
2919 /**
2920  * Create or destroy a flow rule.
2921  * Theorically one rule can match more than one filters.
2922  * We will let it use the filter which it hitt first.
2923  * So, the sequence matters.
2924  */
2925 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
2926                                         const struct rte_flow_attr *attr,
2927                                         const struct rte_flow_item pattern[],
2928                                         const struct rte_flow_action actions[],
2929                                         struct rte_flow_error *error)
2930 {
2931         int ret;
2932         struct rte_eth_ntuple_filter ntuple_filter;
2933         struct rte_eth_ethertype_filter ethertype_filter;
2934         struct hinic_fdir_rule fdir_rule;
2935         struct rte_flow *flow = NULL;
2936         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2937         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2938         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2939         struct hinic_flow_mem *hinic_flow_mem_ptr;
2940         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2941
2942         flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
2943         if (!flow) {
2944                 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
2945                 return NULL;
2946         }
2947
2948         hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
2949                         sizeof(struct hinic_flow_mem), 0);
2950         if (!hinic_flow_mem_ptr) {
2951                 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
2952                 rte_free(flow);
2953                 return NULL;
2954         }
2955
2956         hinic_flow_mem_ptr->flow = flow;
2957         TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2958                                 entries);
2959
2960         /* Add ntuple filter */
2961         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2962         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
2963                         actions, &ntuple_filter, error);
2964         if (!ret) {
2965                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2966                 if (!ret) {
2967                         ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
2968                                 sizeof(struct hinic_ntuple_filter_ele), 0);
2969                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2970                                    &ntuple_filter,
2971                                    sizeof(struct rte_eth_ntuple_filter));
2972                         TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
2973                         ntuple_filter_ptr, entries);
2974                         flow->rule = ntuple_filter_ptr;
2975                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2976
2977                         PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
2978                         hinic_global_func_id(nic_dev->hwdev));
2979                         return flow;
2980                 }
2981                 goto out;
2982         }
2983
2984         /* Add ethertype filter */
2985         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2986         ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
2987                                         &ethertype_filter, error);
2988         if (!ret) {
2989                 ret = hinic_add_del_ethertype_filter(dev, &ethertype_filter,
2990                                                      TRUE);
2991                 if (!ret) {
2992                         ethertype_filter_ptr =
2993                                 rte_zmalloc("hinic_ethertype_filter",
2994                                 sizeof(struct hinic_ethertype_filter_ele), 0);
2995                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2996                                 &ethertype_filter,
2997                                 sizeof(struct rte_eth_ethertype_filter));
2998                         TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
2999                                 ethertype_filter_ptr, entries);
3000                         flow->rule = ethertype_filter_ptr;
3001                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3002
3003                         PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
3004                                         hinic_global_func_id(nic_dev->hwdev));
3005                         return flow;
3006                 }
3007                 goto out;
3008         }
3009
3010         /* Add fdir filter */
3011         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
3012         ret = hinic_parse_fdir_filter(dev, attr, pattern,
3013                                       actions, &fdir_rule, error);
3014         if (!ret) {
3015                 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
3016                         ret = hinic_add_del_fdir_filter(dev,
3017                                         &fdir_rule, TRUE);
3018                 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
3019                         ret = hinic_add_del_tcam_fdir_filter(dev,
3020                                         &fdir_rule, TRUE);
3021                 }  else {
3022                         PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong");
3023                         goto out;
3024                 }
3025                 if (!ret) {
3026                         fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
3027                                 sizeof(struct hinic_fdir_rule_ele), 0);
3028                         rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
3029                                 sizeof(struct hinic_fdir_rule));
3030                         TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
3031                                 fdir_rule_ptr, entries);
3032                         flow->rule = fdir_rule_ptr;
3033                         flow->filter_type = RTE_ETH_FILTER_FDIR;
3034
3035                         PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
3036                                         hinic_global_func_id(nic_dev->hwdev));
3037                         return flow;
3038                 }
3039                 goto out;
3040         }
3041
3042 out:
3043         TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
3044         rte_flow_error_set(error, -ret,
3045                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3046                            "Failed to create flow.");
3047         rte_free(hinic_flow_mem_ptr);
3048         rte_free(flow);
3049         return NULL;
3050 }
3051
3052 /* Destroy a flow rule on hinic. */
3053 static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
3054                                 struct rte_flow_error *error)
3055 {
3056         int ret;
3057         struct rte_flow *pmd_flow = flow;
3058         enum rte_filter_type filter_type = pmd_flow->filter_type;
3059         struct rte_eth_ntuple_filter ntuple_filter;
3060         struct rte_eth_ethertype_filter ethertype_filter;
3061         struct hinic_fdir_rule fdir_rule;
3062         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3063         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3064         struct hinic_fdir_rule_ele *fdir_rule_ptr;
3065         struct hinic_flow_mem *hinic_flow_mem_ptr;
3066         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3067
3068         switch (filter_type) {
3069         case RTE_ETH_FILTER_NTUPLE:
3070                 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
3071                                         pmd_flow->rule;
3072                 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
3073                         sizeof(struct rte_eth_ntuple_filter));
3074                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3075                 if (!ret) {
3076                         TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
3077                                 ntuple_filter_ptr, entries);
3078                         rte_free(ntuple_filter_ptr);
3079                 }
3080                 break;
3081         case RTE_ETH_FILTER_ETHERTYPE:
3082                 ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
3083                                         pmd_flow->rule;
3084                 rte_memcpy(&ethertype_filter,
3085                         &ethertype_filter_ptr->filter_info,
3086                         sizeof(struct rte_eth_ethertype_filter));
3087                 ret = hinic_add_del_ethertype_filter(dev,
3088                                 &ethertype_filter, FALSE);
3089                 if (!ret) {
3090                         TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3091                                 ethertype_filter_ptr, entries);
3092                         rte_free(ethertype_filter_ptr);
3093                 }
3094                 break;
3095         case RTE_ETH_FILTER_FDIR:
3096                 fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
3097                 rte_memcpy(&fdir_rule,
3098                         &fdir_rule_ptr->filter_info,
3099                         sizeof(struct hinic_fdir_rule));
3100                 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
3101                         ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
3102                 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
3103                         ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
3104                                                                 FALSE);
3105                 } else {
3106                         PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!");
3107                         ret = -EINVAL;
3108                 }
3109                 if (!ret) {
3110                         TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
3111                                 fdir_rule_ptr, entries);
3112                         rte_free(fdir_rule_ptr);
3113                 }
3114                 break;
3115         default:
3116                 PMD_DRV_LOG(WARNING, "Filter type (%d) is not supported",
3117                         filter_type);
3118                 ret = -EINVAL;
3119                 break;
3120         }
3121
3122         if (ret) {
3123                 rte_flow_error_set(error, EINVAL,
3124                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3125                                 NULL, "Failed to destroy flow");
3126                 return ret;
3127         }
3128
3129         TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
3130                 if (hinic_flow_mem_ptr->flow == pmd_flow) {
3131                         TAILQ_REMOVE(&nic_dev->hinic_flow_list,
3132                                 hinic_flow_mem_ptr, entries);
3133                         rte_free(hinic_flow_mem_ptr);
3134                         break;
3135                 }
3136         }
3137         rte_free(flow);
3138
3139         PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
3140                         hinic_global_func_id(nic_dev->hwdev));
3141
3142         return ret;
3143 }
3144
3145 /* Remove all the n-tuple filters */
3146 static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev)
3147 {
3148         struct hinic_filter_info *filter_info =
3149                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3150         struct hinic_5tuple_filter *p_5tuple;
3151
3152         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
3153                 hinic_remove_5tuple_filter(dev, p_5tuple);
3154 }
3155
3156 /* Remove all the ether type filters */
3157 static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
3158 {
3159         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3160         struct hinic_filter_info *filter_info =
3161                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
3162         int ret = 0;
3163
3164         if (filter_info->type_mask &
3165                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) {
3166                 hinic_ethertype_filter_remove(filter_info,
3167                         HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE));
3168                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE,
3169                                         filter_info->qid, false, true);
3170
3171                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
3172         }
3173
3174         if (filter_info->type_mask &
3175                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) {
3176                 hinic_ethertype_filter_remove(filter_info,
3177                         HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE));
3178                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE,
3179                         filter_info->qid, false, true);
3180         }
3181
3182         if (ret)
3183                 PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x",
3184                                 filter_info->pkt_type);
3185 }
3186
3187 /* Remove all the ether type filters */
3188 static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
3189 {
3190         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3191         struct hinic_tcam_info *tcam_info =
3192                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
3193         struct hinic_tcam_filter *tcam_filter_ptr;
3194
3195         while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list)))
3196                 (void)hinic_del_tcam_filter(dev, tcam_filter_ptr);
3197
3198         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
3199
3200         (void)hinic_flush_tcam_rule(nic_dev->hwdev);
3201 }
3202
3203 static void hinic_filterlist_flush(struct rte_eth_dev *dev)
3204 {
3205         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3206         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3207         struct hinic_fdir_rule_ele *fdir_rule_ptr;
3208         struct hinic_flow_mem *hinic_flow_mem_ptr;
3209         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3210
3211         while ((ntuple_filter_ptr =
3212                         TAILQ_FIRST(&nic_dev->filter_ntuple_list))) {
3213                 TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr,
3214                                  entries);
3215                 rte_free(ntuple_filter_ptr);
3216         }
3217
3218         while ((ethertype_filter_ptr =
3219                         TAILQ_FIRST(&nic_dev->filter_ethertype_list))) {
3220                 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3221                                 ethertype_filter_ptr,
3222                                 entries);
3223                 rte_free(ethertype_filter_ptr);
3224         }
3225
3226         while ((fdir_rule_ptr =
3227                         TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) {
3228                 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr,
3229                                  entries);
3230                 rte_free(fdir_rule_ptr);
3231         }
3232
3233         while ((hinic_flow_mem_ptr =
3234                         TAILQ_FIRST(&nic_dev->hinic_flow_list))) {
3235                 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
3236                                  entries);
3237                 rte_free(hinic_flow_mem_ptr->flow);
3238                 rte_free(hinic_flow_mem_ptr);
3239         }
3240 }
3241
3242 /* Destroy all flow rules associated with a port on hinic. */
3243 static int hinic_flow_flush(struct rte_eth_dev *dev,
3244                                 __rte_unused struct rte_flow_error *error)
3245 {
3246         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3247
3248         hinic_clear_all_ntuple_filter(dev);
3249         hinic_clear_all_ethertype_filter(dev);
3250         hinic_clear_all_fdir_filter(dev);
3251         hinic_filterlist_flush(dev);
3252
3253         PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x",
3254                         hinic_global_func_id(nic_dev->hwdev));
3255         return 0;
3256 }
3257
3258 void hinic_destroy_fdir_filter(struct rte_eth_dev *dev)
3259 {
3260         hinic_clear_all_ntuple_filter(dev);
3261         hinic_clear_all_ethertype_filter(dev);
3262         hinic_clear_all_fdir_filter(dev);
3263         hinic_filterlist_flush(dev);
3264 }
3265
3266 const struct rte_flow_ops hinic_flow_ops = {
3267         .validate = hinic_flow_validate,
3268         .create = hinic_flow_create,
3269         .destroy = hinic_flow_destroy,
3270         .flush = hinic_flow_flush,
3271 };
3272