a7bad570bb59073d12d3b8568c40767d8fc6be16
[dpdk.git] / drivers / net / hinic / hinic_pmd_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
25
26 #define HINIC_MAX_RX_QUEUE_NUM          64
27
28 #ifndef UINT8_MAX
29 #define UINT8_MAX          (u8)(~((u8)0))       /* 0xFF               */
30 #define UINT16_MAX         (u16)(~((u16)0))     /* 0xFFFF             */
31 #define UINT32_MAX         (u32)(~((u32)0))     /* 0xFFFFFFFF         */
32 #define UINT64_MAX         (u64)(~((u64)0))     /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX          (0x7F)
34 #endif
35
36 /* IPSURX MACRO */
37 #define PA_ETH_TYPE_ROCE                0
38 #define PA_ETH_TYPE_IPV4                1
39 #define PA_ETH_TYPE_IPV6                2
40 #define PA_ETH_TYPE_OTHER               3
41
42 #define PA_IP_PROTOCOL_TYPE_TCP         1
43 #define PA_IP_PROTOCOL_TYPE_UDP         2
44 #define PA_IP_PROTOCOL_TYPE_ICMP        3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP   4
46 #define PA_IP_PROTOCOL_TYPE_SCTP        5
47 #define PA_IP_PROTOCOL_TYPE_VRRP        112
48
49 #define IP_HEADER_PROTOCOL_TYPE_TCP     6
50 #define IP_HEADER_PROTOCOL_TYPE_UDP     17
51 #define IP_HEADER_PROTOCOL_TYPE_ICMP    1
52 #define IP_HEADER_PROTOCOL_TYPE_ICMPV6  58
53
54 #define FDIR_TCAM_NORMAL_PACKET         0
55 #define FDIR_TCAM_TUNNEL_PACKET         1
56
57 #define HINIC_MIN_N_TUPLE_PRIO          1
58 #define HINIC_MAX_N_TUPLE_PRIO          7
59
60 /* TCAM type mask in hardware */
61 #define TCAM_PKT_BGP_SPORT      1
62 #define TCAM_PKT_VRRP           2
63 #define TCAM_PKT_BGP_DPORT      3
64 #define TCAM_PKT_LACP           4
65
66 #define TCAM_DIP_IPV4_TYPE      0
67 #define TCAM_DIP_IPV6_TYPE      1
68
69 #define BGP_DPORT_ID            179
70 #define IPPROTO_VRRP            112
71
72 /* Packet type defined in hardware to perform filter */
73 #define PKT_IGMP_IPV4_TYPE     64
74 #define PKT_ICMP_IPV4_TYPE     65
75 #define PKT_ICMP_IPV6_TYPE     66
76 #define PKT_ICMP_IPV6RS_TYPE   67
77 #define PKT_ICMP_IPV6RA_TYPE   68
78 #define PKT_ICMP_IPV6NS_TYPE   69
79 #define PKT_ICMP_IPV6NA_TYPE   70
80 #define PKT_ICMP_IPV6RE_TYPE   71
81 #define PKT_DHCP_IPV4_TYPE     72
82 #define PKT_DHCP_IPV6_TYPE     73
83 #define PKT_LACP_TYPE          74
84 #define PKT_ARP_REQ_TYPE       79
85 #define PKT_ARP_REP_TYPE       80
86 #define PKT_ARP_TYPE           81
87 #define PKT_BGPD_DPORT_TYPE    83
88 #define PKT_BGPD_SPORT_TYPE    84
89 #define PKT_VRRP_TYPE          85
90
91 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
92         (&((struct hinic_nic_dev *)nic_dev)->filter)
93
94 #define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \
95         (&((struct hinic_nic_dev *)nic_dev)->tcam)
96
97
98 enum hinic_atr_flow_type {
99         HINIC_ATR_FLOW_TYPE_IPV4_DIP    = 0x1,
100         HINIC_ATR_FLOW_TYPE_IPV4_SIP    = 0x2,
101         HINIC_ATR_FLOW_TYPE_DPORT       = 0x3,
102         HINIC_ATR_FLOW_TYPE_SPORT       = 0x4,
103 };
104
105 /* Structure to store fdir's info. */
106 struct hinic_fdir_info {
107         uint8_t fdir_flag;
108         uint8_t qid;
109         uint32_t fdir_key;
110 };
111
112 /**
113  * Endless loop will never happen with below assumption
114  * 1. there is at least one no-void item(END)
115  * 2. cur is before END.
116  */
117 static inline const struct rte_flow_item *
118 next_no_void_pattern(const struct rte_flow_item pattern[],
119                 const struct rte_flow_item *cur)
120 {
121         const struct rte_flow_item *next =
122                 cur ? cur + 1 : &pattern[0];
123         while (1) {
124                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
125                         return next;
126                 next++;
127         }
128 }
129
130 static inline const struct rte_flow_action *
131 next_no_void_action(const struct rte_flow_action actions[],
132                 const struct rte_flow_action *cur)
133 {
134         const struct rte_flow_action *next =
135                 cur ? cur + 1 : &actions[0];
136         while (1) {
137                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
138                         return next;
139                 next++;
140         }
141 }
142
143 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
144                                         struct rte_flow_error *error)
145 {
146         /* Must be input direction */
147         if (!attr->ingress) {
148                 rte_flow_error_set(error, EINVAL,
149                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
150                         attr, "Only support ingress.");
151                 return -rte_errno;
152         }
153
154         if (attr->egress) {
155                 rte_flow_error_set(error, EINVAL,
156                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
157                                 attr, "Not support egress.");
158                 return -rte_errno;
159         }
160
161         if (attr->priority) {
162                 rte_flow_error_set(error, EINVAL,
163                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
164                                 attr, "Not support priority.");
165                 return -rte_errno;
166         }
167
168         if (attr->group) {
169                 rte_flow_error_set(error, EINVAL,
170                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
171                                 attr, "Not support group.");
172                 return -rte_errno;
173         }
174
175         return 0;
176 }
177
178 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
179                                 const struct rte_flow_item *pattern,
180                                 const struct rte_flow_action *actions,
181                                 struct rte_flow_error *error)
182 {
183         if (!pattern) {
184                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
185                                 NULL, "NULL pattern.");
186                 return -rte_errno;
187         }
188
189         if (!actions) {
190                 rte_flow_error_set(error, EINVAL,
191                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
192                                 NULL, "NULL action.");
193                 return -rte_errno;
194         }
195
196         if (!attr) {
197                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
198                                    NULL, "NULL attribute.");
199                 return -rte_errno;
200         }
201
202         return 0;
203 }
204
205 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
206                                         struct rte_flow_error *error)
207 {
208         /* The first non-void item should be MAC */
209         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
210                 rte_flow_error_set(error, EINVAL,
211                         RTE_FLOW_ERROR_TYPE_ITEM,
212                         item, "Not supported by ethertype filter");
213                 return -rte_errno;
214         }
215
216         /* Not supported last point for range */
217         if (item->last) {
218                 rte_flow_error_set(error, EINVAL,
219                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
220                         item, "Not supported last point for range");
221                 return -rte_errno;
222         }
223
224         /* Get the MAC info. */
225         if (!item->spec || !item->mask) {
226                 rte_flow_error_set(error, EINVAL,
227                                 RTE_FLOW_ERROR_TYPE_ITEM,
228                                 item, "Not supported by ethertype filter");
229                 return -rte_errno;
230         }
231         return 0;
232 }
233
234 static int
235 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
236                         const struct rte_flow_action *act,
237                         const struct rte_flow_action_queue *act_q,
238                         struct rte_eth_ethertype_filter *filter,
239                         struct rte_flow_error *error)
240 {
241         /* Parse action */
242         act = next_no_void_action(actions, NULL);
243         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
244                 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
245                 rte_flow_error_set(error, EINVAL,
246                                 RTE_FLOW_ERROR_TYPE_ACTION,
247                                 act, "Not supported action.");
248                 return -rte_errno;
249         }
250
251         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
252                 act_q = (const struct rte_flow_action_queue *)act->conf;
253                 filter->queue = act_q->index;
254         } else {
255                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
256         }
257
258         /* Check if the next non-void item is END */
259         act = next_no_void_action(actions, act);
260         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
261                 rte_flow_error_set(error, EINVAL,
262                                 RTE_FLOW_ERROR_TYPE_ACTION,
263                                 act, "Not supported action.");
264                 return -rte_errno;
265         }
266
267         return 0;
268 }
269
270 /**
271  * Parse the rule to see if it is a ethertype rule.
272  * And get the ethertype filter info BTW.
273  * pattern:
274  * The first not void item can be ETH.
275  * The next not void item must be END.
276  * action:
277  * The first not void action should be QUEUE.
278  * The next not void action should be END.
279  * pattern example:
280  * ITEM         Spec                    Mask
281  * ETH          type    0x0807          0xFFFF
282  * END
283  * other members in mask and spec should set to 0x00.
284  * item->last should be NULL.
285  */
286 static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
287                         const struct rte_flow_item *pattern,
288                         const struct rte_flow_action *actions,
289                         struct rte_eth_ethertype_filter *filter,
290                         struct rte_flow_error *error)
291 {
292         const struct rte_flow_item *item;
293         const struct rte_flow_action *act = NULL;
294         const struct rte_flow_item_eth *eth_spec;
295         const struct rte_flow_item_eth *eth_mask;
296         const struct rte_flow_action_queue *act_q = NULL;
297
298         if (hinic_check_filter_arg(attr, pattern, actions, error))
299                 return -rte_errno;
300
301         item = next_no_void_pattern(pattern, NULL);
302         if (hinic_check_ethertype_first_item(item, error))
303                 return -rte_errno;
304
305         eth_spec = (const struct rte_flow_item_eth *)item->spec;
306         eth_mask = (const struct rte_flow_item_eth *)item->mask;
307
308         /*
309          * Mask bits of source MAC address must be full of 0.
310          * Mask bits of destination MAC address must be full
311          * of 1 or full of 0.
312          */
313         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
314             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
315              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
316                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
317                                 item, "Invalid ether address mask");
318                 return -rte_errno;
319         }
320
321         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
322                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
323                                 item, "Invalid ethertype mask");
324                 return -rte_errno;
325         }
326
327         /*
328          * If mask bits of destination MAC address
329          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
330          */
331         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
332                 filter->mac_addr = eth_spec->dst;
333                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
334         } else {
335                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
336         }
337         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
338
339         /* Check if the next non-void item is END. */
340         item = next_no_void_pattern(pattern, item);
341         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
342                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
343                         item, "Not supported by ethertype filter.");
344                 return -rte_errno;
345         }
346
347         if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
348                 return -rte_errno;
349
350         if (hinic_check_ethertype_attr_ele(attr, error))
351                 return -rte_errno;
352
353         return 0;
354 }
355
356 static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
357                         const struct rte_flow_attr *attr,
358                         const struct rte_flow_item pattern[],
359                         const struct rte_flow_action actions[],
360                         struct rte_eth_ethertype_filter *filter,
361                         struct rte_flow_error *error)
362 {
363         if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
364                 return -rte_errno;
365
366         /* NIC doesn't support MAC address. */
367         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
368                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
369                 rte_flow_error_set(error, EINVAL,
370                         RTE_FLOW_ERROR_TYPE_ITEM,
371                         NULL, "Not supported by ethertype filter");
372                 return -rte_errno;
373         }
374
375         if (filter->queue >= dev->data->nb_rx_queues) {
376                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
377                 rte_flow_error_set(error, EINVAL,
378                         RTE_FLOW_ERROR_TYPE_ITEM,
379                         NULL, "Queue index much too big");
380                 return -rte_errno;
381         }
382
383         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
384                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
385                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
386                 rte_flow_error_set(error, EINVAL,
387                         RTE_FLOW_ERROR_TYPE_ITEM,
388                         NULL, "IPv4/IPv6 not supported by ethertype filter");
389                 return -rte_errno;
390         }
391
392         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
393                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
394                 rte_flow_error_set(error, EINVAL,
395                         RTE_FLOW_ERROR_TYPE_ITEM,
396                         NULL, "Drop option is unsupported");
397                 return -rte_errno;
398         }
399
400         /* Hinic only support LACP/ARP for ether type */
401         if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
402                 filter->ether_type != RTE_ETHER_TYPE_ARP) {
403                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
404                 rte_flow_error_set(error, EINVAL,
405                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
406                         "only lacp/arp type supported by ethertype filter");
407                 return -rte_errno;
408         }
409
410         return 0;
411 }
412
413 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
414                                 struct rte_eth_ntuple_filter *filter,
415                                 struct rte_flow_error *error)
416 {
417         /* Must be input direction */
418         if (!attr->ingress) {
419                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
420                 rte_flow_error_set(error, EINVAL,
421                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
422                                    attr, "Only support ingress.");
423                 return -rte_errno;
424         }
425
426         if (attr->egress) {
427                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
428                 rte_flow_error_set(error, EINVAL,
429                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
430                                    attr, "Not support egress.");
431                 return -rte_errno;
432         }
433
434         if (attr->priority > 0xFFFF) {
435                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
436                 rte_flow_error_set(error, EINVAL,
437                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
438                                    attr, "Error priority.");
439                 return -rte_errno;
440         }
441
442         if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
443                     attr->priority > HINIC_MAX_N_TUPLE_PRIO)
444                 filter->priority = 1;
445         else
446                 filter->priority = (uint16_t)attr->priority;
447
448         return 0;
449 }
450
451 static int
452 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
453                         const struct rte_flow_action actions[],
454                         struct rte_eth_ntuple_filter *filter,
455                         struct rte_flow_error *error)
456 {
457         const struct rte_flow_action *act;
458         /*
459          * n-tuple only supports forwarding,
460          * check if the first not void action is QUEUE.
461          */
462         act = next_no_void_action(actions, NULL);
463         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
464                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
465                 rte_flow_error_set(error, EINVAL,
466                         RTE_FLOW_ERROR_TYPE_ACTION,
467                         act, "Flow action type is not QUEUE.");
468                 return -rte_errno;
469         }
470         filter->queue =
471                 ((const struct rte_flow_action_queue *)act->conf)->index;
472
473         /* Check if the next not void item is END */
474         act = next_no_void_action(actions, act);
475         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
476                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
477                 rte_flow_error_set(error, EINVAL,
478                         RTE_FLOW_ERROR_TYPE_ACTION,
479                         act, "Next not void item is not END.");
480                 return -rte_errno;
481         }
482
483         return 0;
484 }
485
486 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
487                                         const struct rte_flow_item pattern[],
488                                         struct rte_flow_error *error)
489 {
490         const struct rte_flow_item *item;
491
492         /* The first not void item can be MAC or IPv4 */
493         item = next_no_void_pattern(pattern, NULL);
494
495         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
496                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
497                 rte_flow_error_set(error, EINVAL,
498                         RTE_FLOW_ERROR_TYPE_ITEM,
499                         item, "Not supported by ntuple filter");
500                 return -rte_errno;
501         }
502
503         /* Skip Ethernet */
504         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
505                 /* Not supported last point for range */
506                 if (item->last) {
507                         rte_flow_error_set(error,
508                                 EINVAL,
509                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
510                                 item, "Not supported last point for range");
511                         return -rte_errno;
512                 }
513                 /* if the first item is MAC, the content should be NULL */
514                 if (item->spec || item->mask) {
515                         rte_flow_error_set(error, EINVAL,
516                                 RTE_FLOW_ERROR_TYPE_ITEM,
517                                 item, "Not supported by ntuple filter");
518                         return -rte_errno;
519                 }
520                 /* check if the next not void item is IPv4 */
521                 item = next_no_void_pattern(pattern, item);
522                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
523                         rte_flow_error_set(error,
524                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
525                                 item, "Not supported by ntuple filter");
526                         return -rte_errno;
527                 }
528         }
529
530         *ipv4_item = item;
531         return 0;
532 }
533
534 static int
535 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
536                         const struct rte_flow_item pattern[],
537                         struct rte_eth_ntuple_filter *filter,
538                         struct rte_flow_error *error)
539 {
540         const struct rte_flow_item_ipv4 *ipv4_spec;
541         const struct rte_flow_item_ipv4 *ipv4_mask;
542         const struct rte_flow_item *item = *in_out_item;
543
544         /* Get the IPv4 info */
545         if (!item->spec || !item->mask) {
546                 rte_flow_error_set(error, EINVAL,
547                         RTE_FLOW_ERROR_TYPE_ITEM,
548                         item, "Invalid ntuple mask");
549                 return -rte_errno;
550         }
551         /* Not supported last point for range */
552         if (item->last) {
553                 rte_flow_error_set(error, EINVAL,
554                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
555                         item, "Not supported last point for range");
556                 return -rte_errno;
557         }
558
559         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
560         /*
561          * Only support src & dst addresses, protocol,
562          * others should be masked.
563          */
564         if (ipv4_mask->hdr.version_ihl ||
565                 ipv4_mask->hdr.type_of_service ||
566                 ipv4_mask->hdr.total_length ||
567                 ipv4_mask->hdr.packet_id ||
568                 ipv4_mask->hdr.fragment_offset ||
569                 ipv4_mask->hdr.time_to_live ||
570                 ipv4_mask->hdr.hdr_checksum ||
571                 !ipv4_mask->hdr.next_proto_id) {
572                 rte_flow_error_set(error,
573                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
574                         item, "Not supported by ntuple filter");
575                 return -rte_errno;
576         }
577
578         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
579         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
580         filter->proto_mask = ipv4_mask->hdr.next_proto_id;
581
582         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
583         filter->dst_ip = ipv4_spec->hdr.dst_addr;
584         filter->src_ip = ipv4_spec->hdr.src_addr;
585         filter->proto  = ipv4_spec->hdr.next_proto_id;
586
587         /* Get next no void item */
588         *in_out_item = next_no_void_pattern(pattern, item);
589         return 0;
590 }
591
592 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
593                                 const struct rte_flow_item pattern[],
594                                 struct rte_eth_ntuple_filter *filter,
595                                 struct rte_flow_error *error)
596 {
597         const struct rte_flow_item_tcp *tcp_spec;
598         const struct rte_flow_item_tcp *tcp_mask;
599         const struct rte_flow_item_icmp *icmp_mask;
600         const struct rte_flow_item *item = *in_out_item;
601         u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
602
603         if (item->type == RTE_FLOW_ITEM_TYPE_END)
604                 return 0;
605
606         /* Get TCP or UDP info */
607         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
608                 (!item->spec || !item->mask)) {
609                 memset(filter, 0, ntuple_filter_size);
610                 rte_flow_error_set(error, EINVAL,
611                         RTE_FLOW_ERROR_TYPE_ITEM,
612                         item, "Invalid ntuple mask");
613                 return -rte_errno;
614         }
615
616         /* Not supported last point for range */
617         if (item->last) {
618                 memset(filter, 0, ntuple_filter_size);
619                 rte_flow_error_set(error, EINVAL,
620                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
621                         item, "Not supported last point for range");
622                 return -rte_errno;
623         }
624
625         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
626                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
627
628                 /*
629                  * Only support src & dst ports, tcp flags,
630                  * others should be masked.
631                  */
632                 if (tcp_mask->hdr.sent_seq ||
633                         tcp_mask->hdr.recv_ack ||
634                         tcp_mask->hdr.data_off ||
635                         tcp_mask->hdr.rx_win ||
636                         tcp_mask->hdr.cksum ||
637                         tcp_mask->hdr.tcp_urp) {
638                         memset(filter, 0, ntuple_filter_size);
639                         rte_flow_error_set(error, EINVAL,
640                                 RTE_FLOW_ERROR_TYPE_ITEM,
641                                 item, "Not supported by ntuple filter");
642                         return -rte_errno;
643                 }
644
645                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
646                 filter->src_port_mask  = tcp_mask->hdr.src_port;
647                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
648                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
649                 } else if (!tcp_mask->hdr.tcp_flags) {
650                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
651                 } else {
652                         memset(filter, 0, ntuple_filter_size);
653                         rte_flow_error_set(error, EINVAL,
654                                 RTE_FLOW_ERROR_TYPE_ITEM,
655                                 item, "Not supported by ntuple filter");
656                         return -rte_errno;
657                 }
658
659                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
660                 filter->dst_port  = tcp_spec->hdr.dst_port;
661                 filter->src_port  = tcp_spec->hdr.src_port;
662                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
663         } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
664                 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
665
666                 /* ICMP all should be masked. */
667                 if (icmp_mask->hdr.icmp_cksum ||
668                         icmp_mask->hdr.icmp_ident ||
669                         icmp_mask->hdr.icmp_seq_nb ||
670                         icmp_mask->hdr.icmp_type ||
671                         icmp_mask->hdr.icmp_code) {
672                         memset(filter, 0, ntuple_filter_size);
673                         rte_flow_error_set(error, EINVAL,
674                                 RTE_FLOW_ERROR_TYPE_ITEM,
675                                 item, "Not supported by ntuple filter");
676                         return -rte_errno;
677                 }
678         }
679
680         /* Get next no void item */
681         *in_out_item = next_no_void_pattern(pattern, item);
682         return 0;
683 }
684
685 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
686                                         struct rte_eth_ntuple_filter *filter,
687                                         struct rte_flow_error *error)
688 {
689         /* Check if the next not void item is END */
690         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
691                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
692                 rte_flow_error_set(error, EINVAL,
693                         RTE_FLOW_ERROR_TYPE_ITEM,
694                         item, "Not supported by ntuple filter");
695                 return -rte_errno;
696         }
697         return 0;
698 }
699
700 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
701                                         const struct rte_flow_item pattern[],
702                                         struct rte_eth_ntuple_filter *filter,
703                                         struct rte_flow_error *error)
704 {
705         if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
706                 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
707                 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
708                 hinic_ntuple_item_check_end(item, filter, error))
709                 return -rte_errno;
710
711         return 0;
712 }
713
714 /**
715  * Parse the rule to see if it is a n-tuple rule.
716  * And get the n-tuple filter info BTW.
717  * pattern:
718  * The first not void item can be ETH or IPV4.
719  * The second not void item must be IPV4 if the first one is ETH.
720  * The third not void item must be UDP or TCP.
721  * The next not void item must be END.
722  * action:
723  * The first not void action should be QUEUE.
724  * The next not void action should be END.
725  * pattern example:
726  * ITEM         Spec                    Mask
727  * ETH          NULL                    NULL
728  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
729  *              dst_addr 192.167.3.50   0xFFFFFFFF
730  *              next_proto_id   17      0xFF
731  * UDP/TCP/     src_port        80      0xFFFF
732  * SCTP         dst_port        80      0xFFFF
733  * END
734  * other members in mask and spec should set to 0x00.
735  * item->last should be NULL.
736  * Please aware there's an asumption for all the parsers.
737  * rte_flow_item is using big endian, rte_flow_attr and
738  * rte_flow_action are using CPU order.
739  * Because the pattern is used to describe the packets,
740  * normally the packets should use network order.
741  */
742 static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
743                         const struct rte_flow_item pattern[],
744                         const struct rte_flow_action actions[],
745                         struct rte_eth_ntuple_filter *filter,
746                         struct rte_flow_error *error)
747 {
748         const struct rte_flow_item *item = NULL;
749
750         if (hinic_check_filter_arg(attr, pattern, actions, error))
751                 return -rte_errno;
752
753         if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
754                 return -rte_errno;
755
756         if (hinic_check_ntuple_act_ele(item, actions, filter, error))
757                 return -rte_errno;
758
759         if (hinic_check_ntuple_attr_ele(attr, filter, error))
760                 return -rte_errno;
761
762         return 0;
763 }
764
765 static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
766                         const struct rte_flow_attr *attr,
767                         const struct rte_flow_item pattern[],
768                         const struct rte_flow_action actions[],
769                         struct rte_eth_ntuple_filter *filter,
770                         struct rte_flow_error *error)
771 {
772         int ret;
773
774         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
775         if (ret)
776                 return ret;
777
778         /* Hinic doesn't support tcp flags */
779         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
780                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
781                 rte_flow_error_set(error, EINVAL,
782                                    RTE_FLOW_ERROR_TYPE_ITEM,
783                                    NULL, "Not supported by ntuple filter");
784                 return -rte_errno;
785         }
786
787         /* Hinic doesn't support many priorities */
788         if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
789             filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
790                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
791                 rte_flow_error_set(error, EINVAL,
792                         RTE_FLOW_ERROR_TYPE_ITEM,
793                         NULL, "Priority not supported by ntuple filter");
794                 return -rte_errno;
795         }
796
797         if (filter->queue >= dev->data->nb_rx_queues)
798                 return -rte_errno;
799
800         /* Fixed value for hinic */
801         filter->flags = RTE_5TUPLE_FLAGS;
802         return 0;
803 }
804
805 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
806                                         const struct rte_flow_item pattern[],
807                                         struct rte_flow_error *error)
808 {
809         const struct rte_flow_item *item;
810
811         /* The first not void item can be MAC or IPv4  or TCP or UDP */
812         item = next_no_void_pattern(pattern, NULL);
813
814         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
815                 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
816                 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
817                 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
818                 rte_flow_error_set(error, EINVAL,
819                         RTE_FLOW_ERROR_TYPE_ITEM, item,
820                         "Not supported by fdir filter,support mac,ipv4,tcp,udp");
821                 return -rte_errno;
822         }
823
824         /* Not supported last point for range */
825         if (item->last) {
826                 rte_flow_error_set(error, EINVAL,
827                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
828                         "Not supported last point for range");
829                 return -rte_errno;
830         }
831
832         /* Skip Ethernet */
833         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
834                 /* All should be masked. */
835                 if (item->spec || item->mask) {
836                         rte_flow_error_set(error, EINVAL,
837                                 RTE_FLOW_ERROR_TYPE_ITEM,
838                                 item, "Not supported by fdir filter,support mac");
839                         return -rte_errno;
840                 }
841                 /* Check if the next not void item is IPv4 */
842                 item = next_no_void_pattern(pattern, item);
843                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
844                         item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
845                         rte_flow_error_set(error, EINVAL,
846                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
847                                 "Not supported by fdir filter,support mac,ipv4");
848                         return -rte_errno;
849                 }
850         }
851
852         *ip_item = item;
853         return 0;
854 }
855
856 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
857                                 const struct rte_flow_item pattern[],
858                                 struct hinic_fdir_rule *rule,
859                                 struct rte_flow_error *error)
860 {
861         const struct rte_flow_item_ipv4 *ipv4_spec;
862         const struct rte_flow_item_ipv4 *ipv4_mask;
863         const struct rte_flow_item_ipv6 *ipv6_spec;
864         const struct rte_flow_item_ipv6 *ipv6_mask;
865         const struct rte_flow_item *item = *in_out_item;
866         int i;
867
868         /* Get the IPv4 info */
869         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
870                 /* Not supported last point for range */
871                 if (item->last) {
872                         rte_flow_error_set(error, EINVAL,
873                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
874                                 item, "Not supported last point for range");
875                         return -rte_errno;
876                 }
877
878                 if (!item->mask) {
879                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
880                         rte_flow_error_set(error, EINVAL,
881                                 RTE_FLOW_ERROR_TYPE_ITEM,
882                                 item, "Invalid fdir filter mask");
883                         return -rte_errno;
884                 }
885
886                 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
887                 /*
888                  * Only support src & dst addresses,
889                  * others should be masked.
890                  */
891                 if (ipv4_mask->hdr.version_ihl ||
892                         ipv4_mask->hdr.type_of_service ||
893                         ipv4_mask->hdr.total_length ||
894                         ipv4_mask->hdr.packet_id ||
895                         ipv4_mask->hdr.fragment_offset ||
896                         ipv4_mask->hdr.time_to_live ||
897                         ipv4_mask->hdr.next_proto_id ||
898                         ipv4_mask->hdr.hdr_checksum) {
899                         rte_flow_error_set(error,
900                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
901                                 "Not supported by fdir filter, support src,dst ip");
902                         return -rte_errno;
903                 }
904
905                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
906                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
907                 rule->mode = HINIC_FDIR_MODE_NORMAL;
908
909                 if (item->spec) {
910                         ipv4_spec =
911                                 (const struct rte_flow_item_ipv4 *)item->spec;
912                         rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
913                         rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
914                 }
915
916                 /*
917                  * Check if the next not void item is
918                  * TCP or UDP or END.
919                  */
920                 item = next_no_void_pattern(pattern, item);
921                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
922                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
923                     item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
924                     item->type != RTE_FLOW_ITEM_TYPE_ANY &&
925                     item->type != RTE_FLOW_ITEM_TYPE_END) {
926                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
927                         rte_flow_error_set(error, EINVAL,
928                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
929                                 "Not supported by fdir filter, support tcp, udp, end");
930                         return -rte_errno;
931                 }
932         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
933                 /* Not supported last point for range */
934                 if (item->last) {
935                         rte_flow_error_set(error, EINVAL,
936                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
937                                 item, "Not supported last point for range");
938                         return -rte_errno;
939                 }
940
941                 if (!item->mask) {
942                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
943                         rte_flow_error_set(error, EINVAL,
944                                 RTE_FLOW_ERROR_TYPE_ITEM,
945                                 item, "Invalid fdir filter mask");
946                         return -rte_errno;
947                 }
948
949                 ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask;
950
951                 /* Only support dst addresses,  others should be masked */
952                 if (ipv6_mask->hdr.vtc_flow ||
953                     ipv6_mask->hdr.payload_len ||
954                     ipv6_mask->hdr.proto ||
955                     ipv6_mask->hdr.hop_limits) {
956                         rte_flow_error_set(error, EINVAL,
957                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
958                                 "Not supported by fdir filter, support dst ipv6");
959                         return -rte_errno;
960                 }
961
962                 /* check ipv6 src addr mask, ipv6 src addr is 16 bytes */
963                 for (i = 0; i < 16; i++) {
964                         if (ipv6_mask->hdr.src_addr[i] == UINT8_MAX) {
965                                 rte_flow_error_set(error, EINVAL,
966                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
967                                         "Not supported by fdir filter, do not support src ipv6");
968                                 return -rte_errno;
969                         }
970                 }
971
972                 if (!item->spec) {
973                         rte_flow_error_set(error, EINVAL,
974                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
975                                 "Not supported by fdir filter, ipv6 spec is NULL");
976                         return -rte_errno;
977                 }
978
979                 for (i = 0; i < 16; i++) {
980                         if (ipv6_mask->hdr.dst_addr[i] == UINT8_MAX)
981                                 rule->mask.dst_ipv6_mask |= 1 << i;
982                 }
983
984                 ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
985                 rte_memcpy(rule->hinic_fdir.dst_ipv6,
986                            ipv6_spec->hdr.dst_addr, 16);
987
988                 /*
989                  * Check if the next not void item is TCP or UDP or ICMP.
990                  */
991                 item = next_no_void_pattern(pattern, item);
992                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
993                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
994                     item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
995                     item->type != RTE_FLOW_ITEM_TYPE_ICMP6){
996                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
997                         rte_flow_error_set(error, EINVAL,
998                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
999                                 "Not supported by fdir filter, support tcp, udp, icmp");
1000                         return -rte_errno;
1001                 }
1002         }
1003
1004         *in_out_item = item;
1005         return 0;
1006 }
1007
1008 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
1009                         __rte_unused const struct rte_flow_item pattern[],
1010                         __rte_unused struct hinic_fdir_rule *rule,
1011                         struct rte_flow_error *error)
1012 {
1013         const struct rte_flow_item *item = *in_out_item;
1014
1015         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1016                 rte_flow_error_set(error, EINVAL,
1017                         RTE_FLOW_ERROR_TYPE_ITEM,
1018                         item, "Not supported by normal fdir filter, not support l4");
1019                 return -rte_errno;
1020         }
1021
1022         return 0;
1023 }
1024
1025
1026 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
1027                                         struct hinic_fdir_rule *rule,
1028                                         struct rte_flow_error *error)
1029 {
1030         /* Check if the next not void item is END */
1031         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1032                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1033                 rte_flow_error_set(error, EINVAL,
1034                         RTE_FLOW_ERROR_TYPE_ITEM,
1035                         item, "Not supported by fdir filter, support end");
1036                 return -rte_errno;
1037         }
1038
1039         return 0;
1040 }
1041
1042 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
1043                                         const struct rte_flow_item pattern[],
1044                                         struct hinic_fdir_rule *rule,
1045                                         struct rte_flow_error *error)
1046 {
1047         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1048             hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1049             hinic_normal_item_check_l4(&item, pattern, rule, error) ||
1050             hinic_normal_item_check_end(item, rule, error))
1051                 return -rte_errno;
1052
1053         return 0;
1054 }
1055
1056 static int
1057 hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item,
1058                                 const struct rte_flow_item pattern[],
1059                                 struct hinic_fdir_rule *rule,
1060                                 struct rte_flow_error *error)
1061 {
1062         const struct rte_flow_item *item = *in_out_item;
1063         const struct rte_flow_item_tcp *tcp_spec;
1064         const struct rte_flow_item_tcp *tcp_mask;
1065         const struct rte_flow_item_udp *udp_spec;
1066         const struct rte_flow_item_udp *udp_mask;
1067
1068         if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
1069                 rule->mode = HINIC_FDIR_MODE_TCAM;
1070                 rule->mask.proto_mask = UINT16_MAX;
1071                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
1072         } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6) {
1073                 rule->mode = HINIC_FDIR_MODE_TCAM;
1074                 rule->mask.proto_mask = UINT16_MAX;
1075                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMPV6;
1076         } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1077                 rule->mode = HINIC_FDIR_MODE_TCAM;
1078         } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1079                 if (!item->mask) {
1080                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1081                         rte_flow_error_set(error, EINVAL,
1082                                 RTE_FLOW_ERROR_TYPE_ITEM,
1083                                 item, "Not supported by fdir filter, support src, dst ports");
1084                         return -rte_errno;
1085                 }
1086
1087                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1088
1089                 /*
1090                  * Only support src & dst ports, tcp flags,
1091                  * others should be masked.
1092                  */
1093                 if (tcp_mask->hdr.sent_seq ||
1094                         tcp_mask->hdr.recv_ack ||
1095                         tcp_mask->hdr.data_off ||
1096                         tcp_mask->hdr.rx_win ||
1097                         tcp_mask->hdr.cksum ||
1098                         tcp_mask->hdr.tcp_urp) {
1099                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1100                         rte_flow_error_set(error, EINVAL,
1101                                 RTE_FLOW_ERROR_TYPE_ITEM,
1102                                 item, "Not supported by fdir normal tcam filter");
1103                         return -rte_errno;
1104                 }
1105
1106                 rule->mode = HINIC_FDIR_MODE_TCAM;
1107                 rule->mask.proto_mask = UINT16_MAX;
1108                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1109                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1110
1111                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1112                 if (item->spec) {
1113                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1114                         rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port;
1115                         rule->hinic_fdir.src_port = tcp_spec->hdr.src_port;
1116                 }
1117         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1118                 /*
1119                  * Only care about src & dst ports,
1120                  * others should be masked.
1121                  */
1122                 if (!item->mask) {
1123                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1124                         rte_flow_error_set(error, EINVAL,
1125                                 RTE_FLOW_ERROR_TYPE_ITEM,
1126                                 item, "Not supported by fdir filter, support src, dst ports");
1127                         return -rte_errno;
1128                 }
1129
1130                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1131                 if (udp_mask->hdr.dgram_len ||
1132                         udp_mask->hdr.dgram_cksum) {
1133                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1134                         rte_flow_error_set(error, EINVAL,
1135                                 RTE_FLOW_ERROR_TYPE_ITEM,
1136                                 item, "Not supported by fdir filter, support udp");
1137                         return -rte_errno;
1138                 }
1139
1140                 rule->mode = HINIC_FDIR_MODE_TCAM;
1141                 rule->mask.proto_mask = UINT16_MAX;
1142                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1143                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1144
1145                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1146                 if (item->spec) {
1147                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1148                         rule->hinic_fdir.src_port = udp_spec->hdr.src_port;
1149                         rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port;
1150                 }
1151         } else {
1152                 (void)memset(rule,  0, sizeof(struct hinic_fdir_rule));
1153                 rte_flow_error_set(error, EINVAL,
1154                                 RTE_FLOW_ERROR_TYPE_ITEM,
1155                                 item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp");
1156                 return -rte_errno;
1157         }
1158
1159         item = next_no_void_pattern(pattern, item);
1160         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1161                 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1162                 rte_flow_error_set(error, EINVAL,
1163                         RTE_FLOW_ERROR_TYPE_ITEM,
1164                         item, "Not supported by fdir filter tcam normal, support end");
1165                 return -rte_errno;
1166         }
1167
1168         /* get next no void item */
1169         *in_out_item = item;
1170
1171         return 0;
1172 }
1173
1174 static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item,
1175                                         const struct rte_flow_item pattern[],
1176                                         struct hinic_fdir_rule *rule,
1177                                         struct rte_flow_error *error)
1178 {
1179         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1180                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1181                 hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) ||
1182                 hinic_normal_item_check_end(item, rule, error))
1183                 return -rte_errno;
1184
1185         return 0;
1186 }
1187
1188 static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item,
1189                                         const struct rte_flow_item pattern[],
1190                                         struct hinic_fdir_rule *rule,
1191                                         struct rte_flow_error *error)
1192 {
1193         const struct rte_flow_item *item = *in_out_item;
1194
1195         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1196                 item = next_no_void_pattern(pattern, item);
1197                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1198                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1199                         rte_flow_error_set(error, EINVAL,
1200                                 RTE_FLOW_ERROR_TYPE_ITEM,
1201                                 item, "Not supported by fdir filter, support vxlan");
1202                         return -rte_errno;
1203                 }
1204
1205                 *in_out_item = item;
1206         } else {
1207                 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1208                 rte_flow_error_set(error, EINVAL,
1209                                 RTE_FLOW_ERROR_TYPE_ITEM,
1210                                 item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp");
1211                 return -rte_errno;
1212         }
1213
1214         return 0;
1215 }
1216
1217 static int
1218 hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item,
1219                                 const struct rte_flow_item pattern[],
1220                                 struct hinic_fdir_rule *rule,
1221                                 struct rte_flow_error *error)
1222 {
1223         const struct rte_flow_item *item = *in_out_item;
1224
1225
1226         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
1227                 item = next_no_void_pattern(pattern, item);
1228                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1229                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1230                     item->type != RTE_FLOW_ITEM_TYPE_ANY) {
1231                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1232                         rte_flow_error_set(error, EINVAL,
1233                                 RTE_FLOW_ERROR_TYPE_ITEM,
1234                                 item, "Not supported by fdir filter, support tcp/udp");
1235                         return -rte_errno;
1236                 }
1237
1238                 *in_out_item = item;
1239         }
1240
1241         return 0;
1242 }
1243
1244 static int
1245 hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item,
1246                                 const struct rte_flow_item pattern[],
1247                                 struct hinic_fdir_rule *rule,
1248                                 struct rte_flow_error *error)
1249 {
1250         const struct rte_flow_item_tcp *tcp_spec;
1251         const struct rte_flow_item_tcp *tcp_mask;
1252         const struct rte_flow_item_udp *udp_spec;
1253         const struct rte_flow_item_udp *udp_mask;
1254         const struct rte_flow_item *item = *in_out_item;
1255
1256         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1257                 /* Not supported last point for range */
1258                 if (item->last) {
1259                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1260                         rte_flow_error_set(error, EINVAL,
1261                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1262                                 item, "Not supported last point for range");
1263                         return -rte_errno;
1264                 }
1265
1266                 /* get the TCP/UDP info */
1267                 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1268                         /*
1269                          * Only care about src & dst ports,
1270                          * others should be masked.
1271                          */
1272                         if (!item->mask) {
1273                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1274                                 rte_flow_error_set(error, EINVAL,
1275                                         RTE_FLOW_ERROR_TYPE_ITEM,
1276                                         item, "Not supported by fdir filter, support src, dst ports");
1277                                 return -rte_errno;
1278                         }
1279
1280                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1281                         if (tcp_mask->hdr.sent_seq ||
1282                                 tcp_mask->hdr.recv_ack ||
1283                                 tcp_mask->hdr.data_off ||
1284                                 tcp_mask->hdr.tcp_flags ||
1285                                 tcp_mask->hdr.rx_win ||
1286                                 tcp_mask->hdr.cksum ||
1287                                 tcp_mask->hdr.tcp_urp) {
1288                                 (void)memset(rule, 0,
1289                                         sizeof(struct hinic_fdir_rule));
1290                                 rte_flow_error_set(error, EINVAL,
1291                                         RTE_FLOW_ERROR_TYPE_ITEM,
1292                                         item, "Not supported by fdir filter, support tcp");
1293                                 return -rte_errno;
1294                         }
1295
1296                         rule->mode = HINIC_FDIR_MODE_TCAM;
1297                         rule->mask.tunnel_flag = UINT16_MAX;
1298                         rule->mask.tunnel_inner_src_port_mask =
1299                                                         tcp_mask->hdr.src_port;
1300                         rule->mask.tunnel_inner_dst_port_mask =
1301                                                         tcp_mask->hdr.dst_port;
1302                         rule->mask.proto_mask = UINT16_MAX;
1303
1304                         rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1305                         if (item->spec) {
1306                                 tcp_spec =
1307                                 (const struct rte_flow_item_tcp *)item->spec;
1308                                 rule->hinic_fdir.tunnel_inner_src_port =
1309                                                         tcp_spec->hdr.src_port;
1310                                 rule->hinic_fdir.tunnel_inner_dst_port =
1311                                                         tcp_spec->hdr.dst_port;
1312                         }
1313                 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1314                         /*
1315                          * Only care about src & dst ports,
1316                          * others should be masked.
1317                          */
1318                         if (!item->mask) {
1319                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1320                                 rte_flow_error_set(error, EINVAL,
1321                                         RTE_FLOW_ERROR_TYPE_ITEM,
1322                                         item, "Not supported by fdir filter, support src, dst ports");
1323                                 return -rte_errno;
1324                         }
1325
1326                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
1327                         if (udp_mask->hdr.dgram_len ||
1328                             udp_mask->hdr.dgram_cksum) {
1329                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1330                                 rte_flow_error_set(error, EINVAL,
1331                                         RTE_FLOW_ERROR_TYPE_ITEM,
1332                                         item, "Not supported by fdir filter, support udp");
1333                                 return -rte_errno;
1334                         }
1335
1336                         rule->mode = HINIC_FDIR_MODE_TCAM;
1337                         rule->mask.tunnel_flag = UINT16_MAX;
1338                         rule->mask.tunnel_inner_src_port_mask =
1339                                                         udp_mask->hdr.src_port;
1340                         rule->mask.tunnel_inner_dst_port_mask =
1341                                                         udp_mask->hdr.dst_port;
1342                         rule->mask.proto_mask = UINT16_MAX;
1343
1344                         rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1345                         if (item->spec) {
1346                                 udp_spec =
1347                                 (const struct rte_flow_item_udp *)item->spec;
1348                                 rule->hinic_fdir.tunnel_inner_src_port =
1349                                                         udp_spec->hdr.src_port;
1350                                 rule->hinic_fdir.tunnel_inner_dst_port =
1351                                                         udp_spec->hdr.dst_port;
1352                         }
1353                 } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1354                         rule->mode = HINIC_FDIR_MODE_TCAM;
1355                         rule->mask.tunnel_flag = UINT16_MAX;
1356                 } else {
1357                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1358                         rte_flow_error_set(error, EINVAL,
1359                                 RTE_FLOW_ERROR_TYPE_ITEM,
1360                                 item, "Not supported by fdir filter, support tcp/udp");
1361                         return -rte_errno;
1362                 }
1363
1364                 /* get next no void item */
1365                 *in_out_item = next_no_void_pattern(pattern, item);
1366         }
1367
1368         return 0;
1369 }
1370
1371 static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item,
1372                                         const struct rte_flow_item pattern[],
1373                                         struct hinic_fdir_rule *rule,
1374                                         struct rte_flow_error *error)
1375 {
1376         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1377                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1378                 hinic_tunnel_item_check_l4(&item, pattern, rule, error) ||
1379                 hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) ||
1380                 hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) ||
1381                 hinic_normal_item_check_end(item, rule, error))
1382                 return -rte_errno;
1383
1384         return 0;
1385 }
1386
1387 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1388                                         struct hinic_fdir_rule *rule,
1389                                         struct rte_flow_error *error)
1390 {
1391         /* Must be input direction */
1392         if (!attr->ingress) {
1393                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1394                 rte_flow_error_set(error, EINVAL,
1395                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1396                                    attr, "Only support ingress.");
1397                 return -rte_errno;
1398         }
1399
1400         /* Not supported */
1401         if (attr->egress) {
1402                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1403                 rte_flow_error_set(error, EINVAL,
1404                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1405                                    attr, "Not support egress.");
1406                 return -rte_errno;
1407         }
1408
1409         /* Not supported */
1410         if (attr->priority) {
1411                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1412                 rte_flow_error_set(error, EINVAL,
1413                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1414                         attr, "Not support priority.");
1415                 return -rte_errno;
1416         }
1417
1418         return 0;
1419 }
1420
1421 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1422                                 const struct rte_flow_action actions[],
1423                                 struct hinic_fdir_rule *rule,
1424                                 struct rte_flow_error *error)
1425 {
1426         const struct rte_flow_action *act;
1427
1428         /* Check if the first not void action is QUEUE */
1429         act = next_no_void_action(actions, NULL);
1430         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1431                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1432                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1433                         item, "Not supported action.");
1434                 return -rte_errno;
1435         }
1436
1437         rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1438
1439         /* Check if the next not void item is END */
1440         act = next_no_void_action(actions, act);
1441         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1442                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1443                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1444                         act, "Not supported action.");
1445                 return -rte_errno;
1446         }
1447
1448         return 0;
1449 }
1450
1451 /**
1452  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1453  * And get the flow director filter info BTW.
1454  * UDP/TCP/SCTP PATTERN:
1455  * The first not void item can be ETH or IPV4 or IPV6
1456  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1457  * The next not void item could be UDP or TCP(optional)
1458  * The next not void item must be END.
1459  * ACTION:
1460  * The first not void action should be QUEUE.
1461  * The second not void optional action should be MARK,
1462  * mark_id is a uint32_t number.
1463  * The next not void action should be END.
1464  * UDP/TCP pattern example:
1465  * ITEM          Spec                                       Mask
1466  * ETH            NULL                                    NULL
1467  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1468  *                   dst_addr  1.2.3.5                 0xFFFFFFFF
1469  * UDP/TCP    src_port  80                         0xFFFF
1470  *                   dst_port  80                         0xFFFF
1471  * END
1472  * Other members in mask and spec should set to 0x00.
1473  * Item->last should be NULL.
1474  */
1475 static int
1476 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1477                                const struct rte_flow_item pattern[],
1478                                const struct rte_flow_action actions[],
1479                                struct hinic_fdir_rule *rule,
1480                                struct rte_flow_error *error)
1481 {
1482         const struct rte_flow_item *item = NULL;
1483
1484         if (hinic_check_filter_arg(attr, pattern, actions, error))
1485                 return -rte_errno;
1486
1487         if (hinic_check_normal_item_ele(item, pattern, rule, error))
1488                 return -rte_errno;
1489
1490         if (hinic_check_normal_attr_ele(attr, rule, error))
1491                 return -rte_errno;
1492
1493         if (hinic_check_normal_act_ele(item, actions, rule, error))
1494                 return -rte_errno;
1495
1496         return 0;
1497 }
1498
1499 /**
1500  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1501  * And get the flow director filter info BTW.
1502  * UDP/TCP/SCTP PATTERN:
1503  * The first not void item can be ETH or IPV4 or IPV6
1504  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1505  * The next not void item can be ANY/TCP/UDP
1506  * ACTION:
1507  * The first not void action should be QUEUE.
1508  * The second not void optional action should be MARK,
1509  * mark_id is a uint32_t number.
1510  * The next not void action should be END.
1511  * UDP/TCP pattern example:
1512  * ITEM                 Spec                           Mask
1513  * ETH            NULL                                 NULL
1514  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1515  *                dst_addr  1.2.3.5                 0xFFFFFFFF
1516  * UDP/TCP        src_port  80                      0xFFFF
1517  *                dst_port  80                      0xFFFF
1518  * END
1519  * Other members in mask and spec should set to 0x00.
1520  * Item->last should be NULL.
1521  */
1522 static int
1523 hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr,
1524                                const struct rte_flow_item pattern[],
1525                                const struct rte_flow_action actions[],
1526                                struct hinic_fdir_rule *rule,
1527                                struct rte_flow_error *error)
1528 {
1529         const struct rte_flow_item *item = NULL;
1530
1531         if (hinic_check_filter_arg(attr, pattern, actions, error))
1532                 return -rte_errno;
1533
1534         if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error))
1535                 return -rte_errno;
1536
1537         if (hinic_check_normal_attr_ele(attr, rule, error))
1538                 return -rte_errno;
1539
1540         if (hinic_check_normal_act_ele(item, actions, rule, error))
1541                 return -rte_errno;
1542
1543         return 0;
1544 }
1545
1546 /**
1547  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1548  * And get the flow director filter info BTW.
1549  * UDP/TCP/SCTP PATTERN:
1550  * The first not void item can be ETH or IPV4 or IPV6
1551  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1552  * The next not void item must be UDP
1553  * The next not void item must be VXLAN(optional)
1554  * The first not void item can be ETH or IPV4 or IPV6
1555  * The next not void item could be ANY or UDP or TCP(optional)
1556  * The next not void item must be END.
1557  * ACTION:
1558  * The first not void action should be QUEUE.
1559  * The second not void optional action should be MARK,
1560  * mark_id is a uint32_t number.
1561  * The next not void action should be END.
1562  * UDP/TCP pattern example:
1563  * ITEM             Spec                            Mask
1564  * ETH            NULL                              NULL
1565  * IPV4        src_addr  1.2.3.6                 0xFFFFFFFF
1566  *             dst_addr  1.2.3.5                 0xFFFFFFFF
1567  * UDP            NULL                              NULL
1568  * VXLAN          NULL                              NULL
1569  * UDP/TCP     src_port  80                      0xFFFF
1570  *             dst_port  80                      0xFFFF
1571  * END
1572  * Other members in mask and spec should set to 0x00.
1573  * Item->last should be NULL.
1574  */
1575 static int
1576 hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr,
1577                                const struct rte_flow_item pattern[],
1578                                const struct rte_flow_action actions[],
1579                                struct hinic_fdir_rule *rule,
1580                                struct rte_flow_error *error)
1581 {
1582         const struct rte_flow_item *item = NULL;
1583
1584         if (hinic_check_filter_arg(attr, pattern, actions, error))
1585                 return -rte_errno;
1586
1587         if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error))
1588                 return -rte_errno;
1589
1590         if (hinic_check_normal_attr_ele(attr, rule, error))
1591                 return -rte_errno;
1592
1593         if (hinic_check_normal_act_ele(item, actions, rule, error))
1594                 return -rte_errno;
1595
1596         return 0;
1597 }
1598
1599 static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1600                         const struct rte_flow_attr *attr,
1601                         const struct rte_flow_item pattern[],
1602                         const struct rte_flow_action actions[],
1603                         struct hinic_fdir_rule *rule,
1604                         struct rte_flow_error *error)
1605 {
1606         int ret;
1607
1608         ret = hinic_parse_fdir_filter_normal(attr, pattern, actions,
1609                                                 rule, error);
1610         if (!ret)
1611                 goto step_next;
1612
1613         ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions,
1614                                                 rule, error);
1615         if (!ret)
1616                 goto step_next;
1617
1618         ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions,
1619                                                 rule, error);
1620         if (ret)
1621                 return ret;
1622
1623 step_next:
1624         if (rule->queue >= dev->data->nb_rx_queues)
1625                 return -ENOTSUP;
1626
1627         return ret;
1628 }
1629
1630 /**
1631  * Check if the flow rule is supported by nic.
1632  * It only checkes the format. Don't guarantee the rule can be programmed into
1633  * the HW. Because there can be no enough room for the rule.
1634  */
1635 static int hinic_flow_validate(struct rte_eth_dev *dev,
1636                                 const struct rte_flow_attr *attr,
1637                                 const struct rte_flow_item pattern[],
1638                                 const struct rte_flow_action actions[],
1639                                 struct rte_flow_error *error)
1640 {
1641         struct rte_eth_ethertype_filter ethertype_filter;
1642         struct rte_eth_ntuple_filter ntuple_filter;
1643         struct hinic_fdir_rule fdir_rule;
1644         int ret;
1645
1646         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1647         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1648                                 actions, &ntuple_filter, error);
1649         if (!ret)
1650                 return 0;
1651
1652         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1653         ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1654                                 actions, &ethertype_filter, error);
1655
1656         if (!ret)
1657                 return 0;
1658
1659         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1660         ret = hinic_parse_fdir_filter(dev, attr, pattern,
1661                                 actions, &fdir_rule, error);
1662
1663         return ret;
1664 }
1665
1666 static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1667                  struct hinic_5tuple_filter_info *hinic_filter_info)
1668 {
1669         switch (filter->dst_ip_mask) {
1670         case UINT32_MAX:
1671                 hinic_filter_info->dst_ip_mask = 0;
1672                 hinic_filter_info->dst_ip = filter->dst_ip;
1673                 break;
1674         case 0:
1675                 hinic_filter_info->dst_ip_mask = 1;
1676                 hinic_filter_info->dst_ip = 0;
1677                 break;
1678         default:
1679                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1680                 return -EINVAL;
1681         }
1682
1683         switch (filter->src_ip_mask) {
1684         case UINT32_MAX:
1685                 hinic_filter_info->src_ip_mask = 0;
1686                 hinic_filter_info->src_ip = filter->src_ip;
1687                 break;
1688         case 0:
1689                 hinic_filter_info->src_ip_mask = 1;
1690                 hinic_filter_info->src_ip = 0;
1691                 break;
1692         default:
1693                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1694                 return -EINVAL;
1695         }
1696         return 0;
1697 }
1698
1699 static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1700                    struct hinic_5tuple_filter_info *hinic_filter_info)
1701 {
1702         switch (filter->dst_port_mask) {
1703         case UINT16_MAX:
1704                 hinic_filter_info->dst_port_mask = 0;
1705                 hinic_filter_info->dst_port = filter->dst_port;
1706                 break;
1707         case 0:
1708                 hinic_filter_info->dst_port_mask = 1;
1709                 hinic_filter_info->dst_port = 0;
1710                 break;
1711         default:
1712                 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1713                 return -EINVAL;
1714         }
1715
1716         switch (filter->src_port_mask) {
1717         case UINT16_MAX:
1718                 hinic_filter_info->src_port_mask = 0;
1719                 hinic_filter_info->src_port = filter->src_port;
1720                 break;
1721         case 0:
1722                 hinic_filter_info->src_port_mask = 1;
1723                 hinic_filter_info->src_port = 0;
1724                 break;
1725         default:
1726                 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1727                 return -EINVAL;
1728         }
1729
1730         return 0;
1731 }
1732
1733 static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1734                     struct hinic_5tuple_filter_info *hinic_filter_info)
1735 {
1736         switch (filter->proto_mask) {
1737         case UINT8_MAX:
1738                 hinic_filter_info->proto_mask = 0;
1739                 hinic_filter_info->proto = filter->proto;
1740                 break;
1741         case 0:
1742                 hinic_filter_info->proto_mask = 1;
1743                 hinic_filter_info->proto = 0;
1744                 break;
1745         default:
1746                 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1747                 return -EINVAL;
1748         }
1749
1750         return 0;
1751 }
1752
1753 static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1754                         struct hinic_5tuple_filter_info *filter_info)
1755 {
1756         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1757                 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1758                 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1759                 return -EINVAL;
1760
1761         if (ntuple_ip_filter(filter, filter_info) ||
1762                 ntuple_port_filter(filter, filter_info) ||
1763                 ntuple_proto_filter(filter, filter_info))
1764                 return -EINVAL;
1765
1766         filter_info->priority = (uint8_t)filter->priority;
1767         return 0;
1768 }
1769
1770 static inline struct hinic_5tuple_filter *
1771 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1772                            struct hinic_5tuple_filter_info *key)
1773 {
1774         struct hinic_5tuple_filter *it;
1775
1776         TAILQ_FOREACH(it, filter_list, entries) {
1777                 if (memcmp(key, &it->filter_info,
1778                         sizeof(struct hinic_5tuple_filter_info)) == 0) {
1779                         return it;
1780                 }
1781         }
1782
1783         return NULL;
1784 }
1785
1786 static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
1787 {
1788         struct tag_pa_rule lacp_rule;
1789         struct tag_pa_action lacp_action;
1790
1791         memset(&lacp_rule, 0, sizeof(lacp_rule));
1792         memset(&lacp_action, 0, sizeof(lacp_action));
1793         /* LACP TCAM rule */
1794         lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
1795         lacp_rule.l2_header.eth_type.val16 = 0x8809;
1796         lacp_rule.l2_header.eth_type.mask16 = 0xffff;
1797
1798         /* LACP TCAM action */
1799         lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
1800         lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1801         lacp_action.pkt_type = PKT_LACP_TYPE;
1802         lacp_action.pri = 0x0;
1803         lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
1804
1805         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
1806                                         &lacp_rule, &lacp_action);
1807 }
1808
1809 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1810 {
1811         struct tag_pa_rule bgp_rule;
1812         struct tag_pa_action bgp_action;
1813
1814         memset(&bgp_rule, 0, sizeof(bgp_rule));
1815         memset(&bgp_action, 0, sizeof(bgp_action));
1816         /* BGP TCAM rule */
1817         bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1818         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1819         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1820         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1821         bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1822         bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1823
1824         /* BGP TCAM action */
1825         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1826         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1827         bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1828         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1829                                * results, not need to convert
1830                                */
1831         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1832
1833         return hinic_set_fdir_tcam(nic_dev->hwdev,
1834                         TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1835 }
1836
1837 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1838 {
1839         struct tag_pa_rule bgp_rule;
1840         struct tag_pa_action bgp_action;
1841
1842         memset(&bgp_rule, 0, sizeof(bgp_rule));
1843         memset(&bgp_action, 0, sizeof(bgp_action));
1844         /* BGP TCAM rule */
1845         bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1846         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1847         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1848         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1849         bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1850         bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1851
1852         /* BGP TCAM action */
1853         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1854         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1855         bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1856         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1857                                * results, not need to convert
1858                                */
1859         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1860
1861         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1862                                         &bgp_rule, &bgp_action);
1863 }
1864
1865 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1866 {
1867         struct tag_pa_rule vrrp_rule;
1868         struct tag_pa_action vrrp_action;
1869
1870         memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1871         memset(&vrrp_action, 0, sizeof(vrrp_action));
1872         /* VRRP TCAM rule */
1873         vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1874         vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1875         vrrp_rule.ip_header.protocol.mask8 = 0xff;
1876         vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1877
1878         /* VRRP TCAM action */
1879         vrrp_action.err_type = 0x3f;
1880         vrrp_action.fwd_action = 0x7;
1881         vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1882         vrrp_action.pri = 0xf;
1883         vrrp_action.push_len = 0xf;
1884
1885         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1886                                         &vrrp_rule, &vrrp_action);
1887 }
1888
1889 /**
1890  *  Clear all fdir configuration.
1891  *
1892  * @param nic_dev
1893  *   The hardware interface of a Ethernet device.
1894  *
1895  * @return
1896  *   0 on success,
1897  *   negative error value otherwise.
1898  */
1899 void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
1900 {
1901         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
1902
1903         (void)hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false);
1904
1905         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
1906
1907         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
1908
1909         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1910
1911         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
1912
1913         (void)hinic_flush_tcam_rule(nic_dev->hwdev);
1914 }
1915
1916 static int hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1917                        struct hinic_filter_info *filter_info)
1918 {
1919         switch (filter->filter_info.proto) {
1920         case IPPROTO_TCP:
1921                 /* Filter type is bgp type if dst_port or src_port is 179 */
1922                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1923                         !(filter->filter_info.dst_port_mask)) {
1924                         filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1925                 } else if (filter->filter_info.src_port ==
1926                         RTE_BE16(BGP_DPORT_ID) &&
1927                         !(filter->filter_info.src_port_mask)) {
1928                         filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1929                 } else {
1930                         PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1931                         " just support BGP now, proto:0x%x, "
1932                         "dst_port:0x%x, dst_port_mask:0x%x."
1933                         "src_port:0x%x, src_port_mask:0x%x.",
1934                         filter->filter_info.proto,
1935                         filter->filter_info.dst_port,
1936                         filter->filter_info.dst_port_mask,
1937                         filter->filter_info.src_port,
1938                         filter->filter_info.src_port_mask);
1939                         return -EINVAL;
1940                 }
1941                 break;
1942
1943         case IPPROTO_VRRP:
1944                 filter_info->pkt_type = PKT_VRRP_TYPE;
1945                 break;
1946
1947         case IPPROTO_ICMP:
1948                 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1949                 break;
1950
1951         case IPPROTO_ICMPV6:
1952                 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1953                 break;
1954
1955         default:
1956                 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1957                 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1958                 "src_port: 0x%x, src_port_mask: 0x%x.",
1959                 filter->filter_info.proto, filter->filter_info.dst_port,
1960                 filter->filter_info.dst_port_mask,
1961                 filter->filter_info.src_port,
1962                 filter->filter_info.src_port_mask);
1963                 return -EINVAL;
1964         }
1965
1966         return 0;
1967 }
1968
1969 static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1970                         struct hinic_filter_info *filter_info, int *index)
1971 {
1972         int type_id;
1973
1974         type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1975
1976         if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1977                 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1978                 return -EINVAL;
1979         }
1980
1981         if (!(filter_info->type_mask & (1 << type_id))) {
1982                 filter_info->type_mask |= 1 << type_id;
1983                 filter->index = type_id;
1984                 filter_info->pkt_filters[type_id].enable = true;
1985                 filter_info->pkt_filters[type_id].pkt_proto =
1986                                                 filter->filter_info.proto;
1987                 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1988                                   filter, entries);
1989         } else {
1990                 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1991                 return -EIO;
1992         }
1993
1994         *index = type_id;
1995         return 0;
1996 }
1997
1998 /*
1999  * Add a 5tuple filter
2000  *
2001  * @param dev:
2002  *  Pointer to struct rte_eth_dev.
2003  * @param filter:
2004  *  Pointer to the filter that will be added.
2005  * @return
2006  *    - On success, zero.
2007  *    - On failure, a negative value.
2008  */
2009 static int hinic_add_5tuple_filter(struct rte_eth_dev *dev,
2010                                 struct hinic_5tuple_filter *filter)
2011 {
2012         struct hinic_filter_info *filter_info =
2013                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2014         int i, ret_fw;
2015         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2016
2017         if (hinic_filter_info_init(filter, filter_info) ||
2018                 hinic_lookup_new_filter(filter, filter_info, &i))
2019                 return -EFAULT;
2020
2021         ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2022                                         filter_info->qid,
2023                                         filter_info->pkt_filters[i].enable,
2024                                         true);
2025         if (ret_fw) {
2026                 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2027                         filter_info->pkt_type, filter->queue,
2028                         filter_info->pkt_filters[i].enable);
2029                 return -EFAULT;
2030         }
2031
2032         PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2033                         filter_info->pkt_type, filter_info->qid,
2034                         filter_info->pkt_filters[filter->index].enable);
2035
2036         switch (filter->filter_info.proto) {
2037         case IPPROTO_TCP:
2038                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
2039                         ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
2040                         if (ret_fw) {
2041                                 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
2042                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
2043                                         filter_info->pkt_type, filter->queue,
2044                                         filter_info->pkt_filters[i].enable);
2045                                 return -EFAULT;
2046                         }
2047
2048                         PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
2049                                 filter->queue,
2050                                 filter_info->pkt_filters[i].enable);
2051                 } else if (filter->filter_info.src_port ==
2052                         RTE_BE16(BGP_DPORT_ID)) {
2053                         ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
2054                         if (ret_fw) {
2055                                 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
2056                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
2057                                         filter_info->pkt_type, filter->queue,
2058                                         filter_info->pkt_filters[i].enable);
2059                                 return -EFAULT;
2060                         }
2061
2062                         PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
2063                                         filter->queue,
2064                                         filter_info->pkt_filters[i].enable);
2065                 }
2066
2067                 break;
2068
2069         case IPPROTO_VRRP:
2070                 ret_fw = hinic_set_vrrp_tcam(nic_dev);
2071                 if (ret_fw) {
2072                         PMD_DRV_LOG(ERR, "Set VRRP failed, "
2073                                 "type: 0x%x, qid: 0x%x, enable: 0x%x",
2074                                 filter_info->pkt_type, filter->queue,
2075                                 filter_info->pkt_filters[i].enable);
2076                         return -EFAULT;
2077                 }
2078                 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
2079                                 filter->queue,
2080                                 filter_info->pkt_filters[i].enable);
2081                 break;
2082
2083         default:
2084                 break;
2085         }
2086
2087         return 0;
2088 }
2089
2090 /*
2091  * Remove a 5tuple filter
2092  *
2093  * @param dev
2094  *  Pointer to struct rte_eth_dev.
2095  * @param filter
2096  *  The pointer of the filter will be removed.
2097  */
2098 static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
2099                            struct hinic_5tuple_filter *filter)
2100 {
2101         struct hinic_filter_info *filter_info =
2102                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2103         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2104
2105         switch (filter->filter_info.proto) {
2106         case IPPROTO_VRRP:
2107                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
2108                 break;
2109
2110         case IPPROTO_TCP:
2111                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
2112                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2113                                                         TCAM_PKT_BGP_DPORT);
2114                 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
2115                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2116                                                         TCAM_PKT_BGP_SPORT);
2117                 break;
2118
2119         default:
2120                 break;
2121         }
2122
2123         hinic_filter_info_init(filter, filter_info);
2124
2125         filter_info->pkt_filters[filter->index].enable = false;
2126         filter_info->pkt_filters[filter->index].pkt_proto = 0;
2127
2128         PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2129                 filter_info->pkt_type,
2130                 filter_info->pkt_filters[filter->index].qid,
2131                 filter_info->pkt_filters[filter->index].enable);
2132         (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2133                                 filter_info->pkt_filters[filter->index].qid,
2134                                 filter_info->pkt_filters[filter->index].enable,
2135                                 true);
2136
2137         filter_info->pkt_type = 0;
2138         filter_info->qid = 0;
2139         filter_info->pkt_filters[filter->index].qid = 0;
2140         filter_info->type_mask &= ~(1 <<  (filter->index));
2141         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
2142
2143         rte_free(filter);
2144 }
2145
2146 /*
2147  * Add or delete a ntuple filter
2148  *
2149  * @param dev
2150  *  Pointer to struct rte_eth_dev.
2151  * @param ntuple_filter
2152  *  Pointer to struct rte_eth_ntuple_filter
2153  * @param add
2154  *  If true, add filter; if false, remove filter
2155  * @return
2156  *    - On success, zero.
2157  *    - On failure, a negative value.
2158  */
2159 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
2160                                 struct rte_eth_ntuple_filter *ntuple_filter,
2161                                 bool add)
2162 {
2163         struct hinic_filter_info *filter_info =
2164                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2165         struct hinic_5tuple_filter_info filter_5tuple;
2166         struct hinic_5tuple_filter *filter;
2167         int ret;
2168
2169         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
2170                 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
2171                 return -EINVAL;
2172         }
2173
2174         memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
2175         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
2176         if (ret < 0)
2177                 return ret;
2178
2179         filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
2180                                          &filter_5tuple);
2181         if (filter != NULL && add) {
2182                 PMD_DRV_LOG(ERR, "Filter exists.");
2183                 return -EEXIST;
2184         }
2185         if (filter == NULL && !add) {
2186                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2187                 return -ENOENT;
2188         }
2189
2190         if (add) {
2191                 filter = rte_zmalloc("hinic_5tuple_filter",
2192                                 sizeof(struct hinic_5tuple_filter), 0);
2193                 if (filter == NULL)
2194                         return -ENOMEM;
2195                 rte_memcpy(&filter->filter_info, &filter_5tuple,
2196                                 sizeof(struct hinic_5tuple_filter_info));
2197                 filter->queue = ntuple_filter->queue;
2198
2199                 filter_info->qid = ntuple_filter->queue;
2200
2201                 ret = hinic_add_5tuple_filter(dev, filter);
2202                 if (ret)
2203                         rte_free(filter);
2204
2205                 return ret;
2206         }
2207
2208         hinic_remove_5tuple_filter(dev, filter);
2209
2210         return 0;
2211 }
2212
2213 static inline int
2214 hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
2215 {
2216         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
2217                 return -EINVAL;
2218
2219         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2220                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
2221                 PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
2222                         " ethertype filter", filter->ether_type);
2223                 return -EINVAL;
2224         }
2225
2226         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
2227                 PMD_DRV_LOG(ERR, "Mac compare is not supported");
2228                 return -EINVAL;
2229         }
2230         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2231                 PMD_DRV_LOG(ERR, "Drop option is not supported");
2232                 return -EINVAL;
2233         }
2234
2235         return 0;
2236 }
2237
2238 static inline int
2239 hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
2240                               struct hinic_pkt_filter *ethertype_filter)
2241 {
2242         switch (ethertype_filter->pkt_proto) {
2243         case RTE_ETHER_TYPE_SLOW:
2244                 filter_info->pkt_type = PKT_LACP_TYPE;
2245                 break;
2246
2247         case RTE_ETHER_TYPE_ARP:
2248                 filter_info->pkt_type = PKT_ARP_TYPE;
2249                 break;
2250
2251         default:
2252                 PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
2253                 return -EIO;
2254         }
2255
2256         return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
2257 }
2258
2259 static inline int
2260 hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
2261                               struct hinic_pkt_filter *ethertype_filter)
2262 {
2263         int id;
2264
2265         /* Find LACP or VRRP type id */
2266         id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
2267         if (id < 0)
2268                 return -EINVAL;
2269
2270         if (!(filter_info->type_mask & (1 << id))) {
2271                 filter_info->type_mask |= 1 << id;
2272                 filter_info->pkt_filters[id].pkt_proto =
2273                         ethertype_filter->pkt_proto;
2274                 filter_info->pkt_filters[id].enable = ethertype_filter->enable;
2275                 filter_info->qid = ethertype_filter->qid;
2276                 return id;
2277         }
2278
2279         PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
2280         return -EINVAL;
2281 }
2282
2283 static inline void
2284 hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
2285                               uint8_t idx)
2286 {
2287         if (idx >= HINIC_MAX_Q_FILTERS)
2288                 return;
2289
2290         filter_info->pkt_type = 0;
2291         filter_info->type_mask &= ~(1 << idx);
2292         filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
2293         filter_info->pkt_filters[idx].enable = FALSE;
2294         filter_info->pkt_filters[idx].qid = 0;
2295 }
2296
2297 static inline int
2298 hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
2299                                struct rte_eth_ethertype_filter *filter,
2300                                bool add)
2301 {
2302         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2303         struct hinic_filter_info *filter_info =
2304                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2305         struct hinic_pkt_filter ethertype_filter;
2306         int i;
2307         int ret_fw;
2308
2309         if (hinic_check_ethertype_filter(filter))
2310                 return -EINVAL;
2311
2312         if (add) {
2313                 ethertype_filter.pkt_proto = filter->ether_type;
2314                 ethertype_filter.enable = TRUE;
2315                 ethertype_filter.qid = (u8)filter->queue;
2316                 i = hinic_ethertype_filter_insert(filter_info,
2317                                                     &ethertype_filter);
2318                 if (i < 0)
2319                         return -ENOSPC;
2320
2321                 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
2322                                 filter_info->pkt_type, filter_info->qid,
2323                                 filter_info->pkt_filters[i].enable, true);
2324                 if (ret_fw) {
2325                         PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2326                                 filter_info->pkt_type, filter->queue,
2327                                 filter_info->pkt_filters[i].enable);
2328
2329                         hinic_ethertype_filter_remove(filter_info, i);
2330                         return -ENOENT;
2331                 }
2332                 PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2333                                 filter_info->pkt_type, filter->queue,
2334                                 filter_info->pkt_filters[i].enable);
2335
2336                 switch (ethertype_filter.pkt_proto) {
2337                 case RTE_ETHER_TYPE_SLOW:
2338                         ret_fw = hinic_set_lacp_tcam(nic_dev);
2339                         if (ret_fw) {
2340                                 PMD_DRV_LOG(ERR, "Add lacp tcam failed");
2341                                 hinic_ethertype_filter_remove(filter_info, i);
2342                                 return -ENOENT;
2343                         }
2344
2345                         PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
2346                         break;
2347                 default:
2348                         break;
2349                 }
2350         } else {
2351                 ethertype_filter.pkt_proto = filter->ether_type;
2352                 i = hinic_ethertype_filter_lookup(filter_info,
2353                                                 &ethertype_filter);
2354
2355                 if ((filter_info->type_mask & (1 << i))) {
2356                         filter_info->pkt_filters[i].enable = FALSE;
2357                         (void)hinic_set_fdir_filter(nic_dev->hwdev,
2358                                         filter_info->pkt_type,
2359                                         filter_info->pkt_filters[i].qid,
2360                                         filter_info->pkt_filters[i].enable,
2361                                         true);
2362
2363                         PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2364                                         filter_info->pkt_type,
2365                                         filter_info->pkt_filters[i].qid,
2366                                         filter_info->pkt_filters[i].enable);
2367
2368                         switch (ethertype_filter.pkt_proto) {
2369                         case RTE_ETHER_TYPE_SLOW:
2370                                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2371                                                                 TCAM_PKT_LACP);
2372                                 PMD_DRV_LOG(INFO, "Del lacp tcam succeed");
2373                                 break;
2374                         default:
2375                                 break;
2376                         }
2377
2378                         hinic_ethertype_filter_remove(filter_info, i);
2379
2380                 } else {
2381                         PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
2382                                         filter_info->pkt_type, filter->queue,
2383                                         filter_info->pkt_filters[i].enable);
2384                         return -ENOENT;
2385                 }
2386         }
2387
2388         return 0;
2389 }
2390
2391 static int hinic_fdir_info_init(struct hinic_fdir_rule *rule,
2392                                 struct hinic_fdir_info *fdir_info)
2393 {
2394         switch (rule->mask.src_ipv4_mask) {
2395         case UINT32_MAX:
2396                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
2397                 fdir_info->qid = rule->queue;
2398                 fdir_info->fdir_key = rule->hinic_fdir.src_ip;
2399                 return 0;
2400
2401         case 0:
2402                 break;
2403
2404         default:
2405                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
2406                 return -EINVAL;
2407         }
2408
2409         switch (rule->mask.dst_ipv4_mask) {
2410         case UINT32_MAX:
2411                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
2412                 fdir_info->qid = rule->queue;
2413                 fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
2414                 return 0;
2415
2416         case 0:
2417                 break;
2418
2419         default:
2420                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
2421                 return -EINVAL;
2422         }
2423
2424         if (fdir_info->fdir_flag == 0) {
2425                 PMD_DRV_LOG(ERR, "All support mask is NULL.");
2426                 return -EINVAL;
2427         }
2428
2429         return 0;
2430 }
2431
2432 static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
2433                                         struct hinic_fdir_rule *rule, bool add)
2434 {
2435         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2436         struct hinic_fdir_info fdir_info;
2437         int ret;
2438
2439         memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
2440
2441         ret = hinic_fdir_info_init(rule, &fdir_info);
2442         if (ret) {
2443                 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2444                 return ret;
2445         }
2446
2447         if (add) {
2448                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2449                                                 true, fdir_info.fdir_key,
2450                                                 true, fdir_info.fdir_flag);
2451                 if (ret) {
2452                         PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2453                                         fdir_info.fdir_flag, fdir_info.qid,
2454                                         fdir_info.fdir_key);
2455                         return -ENOENT;
2456                 }
2457                 PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2458                                 fdir_info.fdir_flag, fdir_info.qid,
2459                                 fdir_info.fdir_key);
2460         } else {
2461                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2462                                                 false, fdir_info.fdir_key, true,
2463                                                 fdir_info.fdir_flag);
2464                 if (ret) {
2465                         PMD_DRV_LOG(ERR, "Del fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2466                                 fdir_info.fdir_flag, fdir_info.qid,
2467                                 fdir_info.fdir_key);
2468                         return -ENOENT;
2469                 }
2470                 PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2471                                 fdir_info.fdir_flag, fdir_info.qid,
2472                                 fdir_info.fdir_key);
2473         }
2474
2475         return 0;
2476 }
2477
2478 static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len)
2479 {
2480         u8 idx;
2481
2482         for (idx = 0; idx < len; idx++)
2483                 key_y[idx] = src_input[idx] & mask[idx];
2484 }
2485
2486 static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len)
2487 {
2488         u8 idx;
2489
2490         for (idx = 0; idx < len; idx++)
2491                 key_x[idx] = key_y[idx] ^ mask[idx];
2492 }
2493
2494 static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
2495                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2496 {
2497         tcam_translate_key_y(fdir_tcam_rule->key.y,
2498                 (u8 *)(&tcam_key->key_info),
2499                 (u8 *)(&tcam_key->key_mask),
2500                 TCAM_FLOW_KEY_SIZE);
2501         tcam_translate_key_x(fdir_tcam_rule->key.x,
2502                 fdir_tcam_rule->key.y,
2503                 (u8 *)(&tcam_key->key_mask),
2504                 TCAM_FLOW_KEY_SIZE);
2505 }
2506
2507 static int hinic_fdir_tcam_ipv4_init(struct rte_eth_dev *dev,
2508                                      struct hinic_fdir_rule *rule,
2509                                      struct tag_tcam_key *tcam_key)
2510 {
2511         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2512
2513         switch (rule->mask.dst_ipv4_mask) {
2514         case UINT32_MAX:
2515                 tcam_key->key_info.ext_dip_h =
2516                         (rule->hinic_fdir.dst_ip >> 16) & 0xffffU;
2517                 tcam_key->key_info.ext_dip_l =
2518                         rule->hinic_fdir.dst_ip & 0xffffU;
2519                 tcam_key->key_mask.ext_dip_h =
2520                         (rule->mask.dst_ipv4_mask >> 16) & 0xffffU;
2521                 tcam_key->key_mask.ext_dip_l =
2522                         rule->mask.dst_ipv4_mask & 0xffffU;
2523                 break;
2524
2525         case 0:
2526                 break;
2527
2528         default:
2529                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2530                 return -EINVAL;
2531         }
2532
2533         if (rule->mask.dst_port_mask > 0) {
2534                 tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port;
2535                 tcam_key->key_mask.dst_port = rule->mask.dst_port_mask;
2536         }
2537
2538         if (rule->mask.src_port_mask > 0) {
2539                 tcam_key->key_info.src_port = rule->hinic_fdir.src_port;
2540                 tcam_key->key_mask.src_port = rule->mask.src_port_mask;
2541         }
2542
2543         switch (rule->mask.tunnel_flag) {
2544         case UINT16_MAX:
2545                 tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET;
2546                 tcam_key->key_mask.tunnel_flag = UINT8_MAX;
2547                 break;
2548
2549         case 0:
2550                 tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET;
2551                 tcam_key->key_mask.tunnel_flag = 0;
2552                 break;
2553
2554         default:
2555                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2556                 return -EINVAL;
2557         }
2558
2559         if (rule->mask.tunnel_inner_dst_port_mask > 0) {
2560                 tcam_key->key_info.dst_port =
2561                                         rule->hinic_fdir.tunnel_inner_dst_port;
2562                 tcam_key->key_mask.dst_port =
2563                                         rule->mask.tunnel_inner_dst_port_mask;
2564         }
2565
2566         if (rule->mask.tunnel_inner_src_port_mask > 0) {
2567                 tcam_key->key_info.src_port =
2568                                         rule->hinic_fdir.tunnel_inner_src_port;
2569                 tcam_key->key_mask.src_port =
2570                                         rule->mask.tunnel_inner_src_port_mask;
2571         }
2572
2573         switch (rule->mask.proto_mask) {
2574         case UINT16_MAX:
2575                 tcam_key->key_info.protocol = rule->hinic_fdir.proto;
2576                 tcam_key->key_mask.protocol = UINT8_MAX;
2577                 break;
2578
2579         case 0:
2580                 break;
2581
2582         default:
2583                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2584                 return -EINVAL;
2585         }
2586
2587         tcam_key->key_mask.function_id = UINT16_MAX;
2588         tcam_key->key_info.function_id =
2589                 hinic_global_func_id(nic_dev->hwdev) & 0x7fff;
2590
2591         return 0;
2592 }
2593
2594 static int hinic_fdir_tcam_ipv6_init(struct rte_eth_dev *dev,
2595                                      struct hinic_fdir_rule *rule,
2596                                      struct tag_tcam_key *tcam_key)
2597 {
2598         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2599
2600         switch (rule->mask.dst_ipv6_mask) {
2601         case UINT16_MAX:
2602                 tcam_key->key_info_ipv6.ipv6_key0 =
2603                         ((rule->hinic_fdir.dst_ipv6[0] << 8) & 0xff00) |
2604                         rule->hinic_fdir.dst_ipv6[1];
2605                 tcam_key->key_info_ipv6.ipv6_key1 =
2606                         ((rule->hinic_fdir.dst_ipv6[2] << 8) & 0xff00) |
2607                         rule->hinic_fdir.dst_ipv6[3];
2608                 tcam_key->key_info_ipv6.ipv6_key2 =
2609                         ((rule->hinic_fdir.dst_ipv6[4] << 8) & 0xff00) |
2610                         rule->hinic_fdir.dst_ipv6[5];
2611                 tcam_key->key_info_ipv6.ipv6_key3 =
2612                         ((rule->hinic_fdir.dst_ipv6[6] << 8) & 0xff00) |
2613                         rule->hinic_fdir.dst_ipv6[7];
2614                 tcam_key->key_info_ipv6.ipv6_key4 =
2615                         ((rule->hinic_fdir.dst_ipv6[8] << 8) & 0xff00) |
2616                         rule->hinic_fdir.dst_ipv6[9];
2617                 tcam_key->key_info_ipv6.ipv6_key5 =
2618                         ((rule->hinic_fdir.dst_ipv6[10] << 8) & 0xff00) |
2619                         rule->hinic_fdir.dst_ipv6[11];
2620                 tcam_key->key_info_ipv6.ipv6_key6 =
2621                         ((rule->hinic_fdir.dst_ipv6[12] << 8) & 0xff00) |
2622                         rule->hinic_fdir.dst_ipv6[13];
2623                 tcam_key->key_info_ipv6.ipv6_key7 =
2624                         ((rule->hinic_fdir.dst_ipv6[14] << 8) & 0xff00) |
2625                         rule->hinic_fdir.dst_ipv6[15];
2626                 tcam_key->key_mask_ipv6.ipv6_key0 = UINT16_MAX;
2627                 tcam_key->key_mask_ipv6.ipv6_key1 = UINT16_MAX;
2628                 tcam_key->key_mask_ipv6.ipv6_key2 = UINT16_MAX;
2629                 tcam_key->key_mask_ipv6.ipv6_key3 = UINT16_MAX;
2630                 tcam_key->key_mask_ipv6.ipv6_key4 = UINT16_MAX;
2631                 tcam_key->key_mask_ipv6.ipv6_key5 = UINT16_MAX;
2632                 tcam_key->key_mask_ipv6.ipv6_key6 = UINT16_MAX;
2633                 tcam_key->key_mask_ipv6.ipv6_key7 = UINT16_MAX;
2634                 break;
2635
2636         case 0:
2637                 break;
2638
2639         default:
2640                 PMD_DRV_LOG(ERR, "invalid dst_ipv6 mask");
2641                 return -EINVAL;
2642         }
2643
2644         if (rule->mask.dst_port_mask > 0) {
2645                 tcam_key->key_info_ipv6.dst_port = rule->hinic_fdir.dst_port;
2646                 tcam_key->key_mask_ipv6.dst_port = rule->mask.dst_port_mask;
2647         }
2648
2649         switch (rule->mask.proto_mask) {
2650         case UINT16_MAX:
2651                 tcam_key->key_info_ipv6.protocol =
2652                         (rule->hinic_fdir.proto) & 0x7F;
2653                 tcam_key->key_mask_ipv6.protocol = 0x7F;
2654                 break;
2655
2656         case 0:
2657                 break;
2658
2659         default:
2660                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask");
2661                 return -EINVAL;
2662         }
2663
2664         tcam_key->key_info_ipv6.ipv6_flag = 1;
2665         tcam_key->key_mask_ipv6.ipv6_flag = 1;
2666
2667         tcam_key->key_mask_ipv6.function_id = UINT8_MAX;
2668         tcam_key->key_info_ipv6.function_id =
2669                         (u8)hinic_global_func_id(nic_dev->hwdev);
2670
2671         return 0;
2672 }
2673
2674 static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
2675                                      struct hinic_fdir_rule *rule,
2676                                      struct tag_tcam_key *tcam_key,
2677                                      struct tag_tcam_cfg_rule *fdir_tcam_rule)
2678 {
2679         int ret = -1;
2680
2681         if (rule->mask.dst_ipv4_mask == UINT32_MAX)
2682                 ret = hinic_fdir_tcam_ipv4_init(dev, rule, tcam_key);
2683         else if (rule->mask.dst_ipv6_mask == UINT16_MAX)
2684                 ret = hinic_fdir_tcam_ipv6_init(dev, rule, tcam_key);
2685
2686         if (ret < 0)
2687                 return ret;
2688
2689         fdir_tcam_rule->data.qid = rule->queue;
2690
2691         tcam_key_calculate(tcam_key, fdir_tcam_rule);
2692
2693         return 0;
2694 }
2695
2696 static inline struct hinic_tcam_filter *
2697 hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list,
2698                         struct tag_tcam_key *key)
2699 {
2700         struct hinic_tcam_filter *it;
2701
2702         TAILQ_FOREACH(it, filter_list, entries) {
2703                 if (memcmp(key, &it->tcam_key,
2704                         sizeof(struct tag_tcam_key)) == 0) {
2705                         return it;
2706                 }
2707         }
2708
2709         return NULL;
2710 }
2711
2712 static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev,
2713                                         struct hinic_tcam_info *tcam_info,
2714                                         struct hinic_tcam_filter *tcam_filter,
2715                                         u16 *tcam_index)
2716 {
2717         int index;
2718         int max_index;
2719         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2720
2721         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2722                 max_index = HINIC_VF_MAX_TCAM_FILTERS;
2723         else
2724                 max_index = HINIC_PF_MAX_TCAM_FILTERS;
2725
2726         for (index = 0; index < max_index; index++) {
2727                 if (tcam_info->tcam_index_array[index] == 0)
2728                         break;
2729         }
2730
2731         if (index == max_index) {
2732                 PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules",
2733                         hinic_global_func_id(nic_dev->hwdev), max_index);
2734                 return -EINVAL;
2735         }
2736
2737         tcam_filter->index = index;
2738         *tcam_index = index;
2739
2740         return 0;
2741 }
2742
2743 static int hinic_add_tcam_filter(struct rte_eth_dev *dev,
2744                                 struct hinic_tcam_filter *tcam_filter,
2745                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2746 {
2747         struct hinic_tcam_info *tcam_info =
2748                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2749         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2750         u16 index = 0;
2751         u16 tcam_block_index = 0;
2752         int rc;
2753
2754         if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index))
2755                 return -EINVAL;
2756
2757         if (tcam_info->tcam_rule_nums == 0) {
2758                 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2759                         rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2760                                 HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index);
2761                         if (rc != 0) {
2762                                 PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block failed!");
2763                                 return -EFAULT;
2764                         }
2765                 } else {
2766                         rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2767                                 HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index);
2768                         if (rc != 0) {
2769                                 PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block failed!");
2770                                 return -EFAULT;
2771                         }
2772                 }
2773
2774                 tcam_info->tcam_block_index = tcam_block_index;
2775         } else {
2776                 tcam_block_index = tcam_info->tcam_block_index;
2777         }
2778
2779         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2780                 fdir_tcam_rule->index =
2781                         HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index;
2782         } else {
2783                 fdir_tcam_rule->index =
2784                         tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index;
2785         }
2786
2787         rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule);
2788         if (rc != 0) {
2789                 PMD_DRV_LOG(ERR, "Fdir_tcam_rule add failed!");
2790                 return -EFAULT;
2791         }
2792
2793         PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x,"
2794                 "tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succeed",
2795                 hinic_global_func_id(nic_dev->hwdev), tcam_block_index,
2796                 fdir_tcam_rule->index, fdir_tcam_rule->data.qid,
2797                 tcam_info->tcam_rule_nums + 1);
2798
2799         if (tcam_info->tcam_rule_nums == 0) {
2800                 rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true);
2801                 if (rc < 0) {
2802                         (void)hinic_del_tcam_rule(nic_dev->hwdev,
2803                                                 fdir_tcam_rule->index);
2804                         return rc;
2805                 }
2806
2807                 rc = hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, true);
2808                 if (rc && rc != HINIC_MGMT_CMD_UNSUPPORTED) {
2809                         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0,
2810                                                 false);
2811                         (void)hinic_del_tcam_rule(nic_dev->hwdev,
2812                                                 fdir_tcam_rule->index);
2813                         return rc;
2814                 }
2815         }
2816
2817         TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries);
2818
2819         tcam_info->tcam_index_array[index] = 1;
2820         tcam_info->tcam_rule_nums++;
2821
2822         return 0;
2823 }
2824
2825 static int hinic_del_tcam_filter(struct rte_eth_dev *dev,
2826                                 struct hinic_tcam_filter *tcam_filter)
2827 {
2828         struct hinic_tcam_info *tcam_info =
2829                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2830         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2831         u32 index = 0;
2832         u16 tcam_block_index = tcam_info->tcam_block_index;
2833         int rc;
2834         u8 block_type = 0;
2835
2836         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2837                 index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) +
2838                         tcam_filter->index;
2839                 block_type = HINIC_TCAM_BLOCK_TYPE_VF;
2840         } else {
2841                 index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS +
2842                         tcam_filter->index;
2843                 block_type = HINIC_TCAM_BLOCK_TYPE_PF;
2844         }
2845
2846         rc = hinic_del_tcam_rule(nic_dev->hwdev, index);
2847         if (rc != 0) {
2848                 PMD_DRV_LOG(ERR, "fdir_tcam_rule del failed!");
2849                 return -EFAULT;
2850         }
2851
2852         PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, "
2853                 "tcam_block_id: %d, index: %d, tcam_rule_nums: %d succeed",
2854                 hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index,
2855                 tcam_info->tcam_rule_nums - 1);
2856
2857         TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries);
2858
2859         tcam_info->tcam_index_array[tcam_filter->index] = 0;
2860
2861         rte_free(tcam_filter);
2862
2863         tcam_info->tcam_rule_nums--;
2864
2865         if (tcam_info->tcam_rule_nums == 0) {
2866                 (void)hinic_free_tcam_block(nic_dev->hwdev, block_type,
2867                                         &tcam_block_index);
2868         }
2869
2870         return 0;
2871 }
2872
2873 static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
2874                                         struct hinic_fdir_rule *rule, bool add)
2875 {
2876         struct hinic_tcam_info *tcam_info =
2877                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2878         struct hinic_tcam_filter *tcam_filter;
2879         struct tag_tcam_cfg_rule fdir_tcam_rule;
2880         struct tag_tcam_key tcam_key;
2881         int ret;
2882
2883         memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule));
2884         memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key));
2885
2886         ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
2887         if (ret) {
2888                 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2889                 return ret;
2890         }
2891
2892         tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list,
2893                                                 &tcam_key);
2894         if (tcam_filter != NULL && add) {
2895                 PMD_DRV_LOG(ERR, "Filter exists.");
2896                 return -EEXIST;
2897         }
2898         if (tcam_filter == NULL && !add) {
2899                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2900                 return -ENOENT;
2901         }
2902
2903         if (add) {
2904                 tcam_filter = rte_zmalloc("hinic_5tuple_filter",
2905                                 sizeof(struct hinic_tcam_filter), 0);
2906                 if (tcam_filter == NULL)
2907                         return -ENOMEM;
2908                 (void)rte_memcpy(&tcam_filter->tcam_key,
2909                                  &tcam_key, sizeof(struct tag_tcam_key));
2910                 tcam_filter->queue = fdir_tcam_rule.data.qid;
2911
2912                 ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule);
2913                 if (ret < 0) {
2914                         rte_free(tcam_filter);
2915                         return ret;
2916                 }
2917
2918                 rule->tcam_index = fdir_tcam_rule.index;
2919
2920         } else {
2921                 PMD_DRV_LOG(INFO, "begin to hinic_del_tcam_filter");
2922                 ret = hinic_del_tcam_filter(dev, tcam_filter);
2923                 if (ret < 0)
2924                         return ret;
2925         }
2926
2927         return 0;
2928 }
2929
2930 /**
2931  * Create or destroy a flow rule.
2932  * Theorically one rule can match more than one filters.
2933  * We will let it use the filter which it hitt first.
2934  * So, the sequence matters.
2935  */
2936 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
2937                                         const struct rte_flow_attr *attr,
2938                                         const struct rte_flow_item pattern[],
2939                                         const struct rte_flow_action actions[],
2940                                         struct rte_flow_error *error)
2941 {
2942         int ret;
2943         struct rte_eth_ntuple_filter ntuple_filter;
2944         struct rte_eth_ethertype_filter ethertype_filter;
2945         struct hinic_fdir_rule fdir_rule;
2946         struct rte_flow *flow = NULL;
2947         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2948         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2949         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2950         struct hinic_flow_mem *hinic_flow_mem_ptr;
2951         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2952
2953         flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
2954         if (!flow) {
2955                 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
2956                 return NULL;
2957         }
2958
2959         hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
2960                         sizeof(struct hinic_flow_mem), 0);
2961         if (!hinic_flow_mem_ptr) {
2962                 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
2963                 rte_free(flow);
2964                 return NULL;
2965         }
2966
2967         hinic_flow_mem_ptr->flow = flow;
2968         TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2969                                 entries);
2970
2971         /* Add ntuple filter */
2972         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2973         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
2974                         actions, &ntuple_filter, error);
2975         if (!ret) {
2976                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2977                 if (!ret) {
2978                         ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
2979                                 sizeof(struct hinic_ntuple_filter_ele), 0);
2980                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2981                                    &ntuple_filter,
2982                                    sizeof(struct rte_eth_ntuple_filter));
2983                         TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
2984                         ntuple_filter_ptr, entries);
2985                         flow->rule = ntuple_filter_ptr;
2986                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2987
2988                         PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
2989                         hinic_global_func_id(nic_dev->hwdev));
2990                         return flow;
2991                 }
2992                 goto out;
2993         }
2994
2995         /* Add ethertype filter */
2996         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2997         ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
2998                                         &ethertype_filter, error);
2999         if (!ret) {
3000                 ret = hinic_add_del_ethertype_filter(dev, &ethertype_filter,
3001                                                      TRUE);
3002                 if (!ret) {
3003                         ethertype_filter_ptr =
3004                                 rte_zmalloc("hinic_ethertype_filter",
3005                                 sizeof(struct hinic_ethertype_filter_ele), 0);
3006                         rte_memcpy(&ethertype_filter_ptr->filter_info,
3007                                 &ethertype_filter,
3008                                 sizeof(struct rte_eth_ethertype_filter));
3009                         TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
3010                                 ethertype_filter_ptr, entries);
3011                         flow->rule = ethertype_filter_ptr;
3012                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3013
3014                         PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
3015                                         hinic_global_func_id(nic_dev->hwdev));
3016                         return flow;
3017                 }
3018                 goto out;
3019         }
3020
3021         /* Add fdir filter */
3022         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
3023         ret = hinic_parse_fdir_filter(dev, attr, pattern,
3024                                       actions, &fdir_rule, error);
3025         if (!ret) {
3026                 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
3027                         ret = hinic_add_del_fdir_filter(dev,
3028                                         &fdir_rule, TRUE);
3029                 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
3030                         ret = hinic_add_del_tcam_fdir_filter(dev,
3031                                         &fdir_rule, TRUE);
3032                 }  else {
3033                         PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong");
3034                         goto out;
3035                 }
3036                 if (!ret) {
3037                         fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
3038                                 sizeof(struct hinic_fdir_rule_ele), 0);
3039                         rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
3040                                 sizeof(struct hinic_fdir_rule));
3041                         TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
3042                                 fdir_rule_ptr, entries);
3043                         flow->rule = fdir_rule_ptr;
3044                         flow->filter_type = RTE_ETH_FILTER_FDIR;
3045
3046                         PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
3047                                         hinic_global_func_id(nic_dev->hwdev));
3048                         return flow;
3049                 }
3050                 goto out;
3051         }
3052
3053 out:
3054         TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
3055         rte_flow_error_set(error, -ret,
3056                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3057                            "Failed to create flow.");
3058         rte_free(hinic_flow_mem_ptr);
3059         rte_free(flow);
3060         return NULL;
3061 }
3062
3063 /* Destroy a flow rule on hinic. */
3064 static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
3065                                 struct rte_flow_error *error)
3066 {
3067         int ret;
3068         struct rte_flow *pmd_flow = flow;
3069         enum rte_filter_type filter_type = pmd_flow->filter_type;
3070         struct rte_eth_ntuple_filter ntuple_filter;
3071         struct rte_eth_ethertype_filter ethertype_filter;
3072         struct hinic_fdir_rule fdir_rule;
3073         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3074         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3075         struct hinic_fdir_rule_ele *fdir_rule_ptr;
3076         struct hinic_flow_mem *hinic_flow_mem_ptr;
3077         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3078
3079         switch (filter_type) {
3080         case RTE_ETH_FILTER_NTUPLE:
3081                 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
3082                                         pmd_flow->rule;
3083                 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
3084                         sizeof(struct rte_eth_ntuple_filter));
3085                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3086                 if (!ret) {
3087                         TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
3088                                 ntuple_filter_ptr, entries);
3089                         rte_free(ntuple_filter_ptr);
3090                 }
3091                 break;
3092         case RTE_ETH_FILTER_ETHERTYPE:
3093                 ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
3094                                         pmd_flow->rule;
3095                 rte_memcpy(&ethertype_filter,
3096                         &ethertype_filter_ptr->filter_info,
3097                         sizeof(struct rte_eth_ethertype_filter));
3098                 ret = hinic_add_del_ethertype_filter(dev,
3099                                 &ethertype_filter, FALSE);
3100                 if (!ret) {
3101                         TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3102                                 ethertype_filter_ptr, entries);
3103                         rte_free(ethertype_filter_ptr);
3104                 }
3105                 break;
3106         case RTE_ETH_FILTER_FDIR:
3107                 fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
3108                 rte_memcpy(&fdir_rule,
3109                         &fdir_rule_ptr->filter_info,
3110                         sizeof(struct hinic_fdir_rule));
3111                 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
3112                         ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
3113                 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
3114                         ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
3115                                                                 FALSE);
3116                 } else {
3117                         PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!");
3118                         ret = -EINVAL;
3119                 }
3120                 if (!ret) {
3121                         TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
3122                                 fdir_rule_ptr, entries);
3123                         rte_free(fdir_rule_ptr);
3124                 }
3125                 break;
3126         default:
3127                 PMD_DRV_LOG(WARNING, "Filter type (%d) is not supported",
3128                         filter_type);
3129                 ret = -EINVAL;
3130                 break;
3131         }
3132
3133         if (ret) {
3134                 rte_flow_error_set(error, EINVAL,
3135                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3136                                 NULL, "Failed to destroy flow");
3137                 return ret;
3138         }
3139
3140         TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
3141                 if (hinic_flow_mem_ptr->flow == pmd_flow) {
3142                         TAILQ_REMOVE(&nic_dev->hinic_flow_list,
3143                                 hinic_flow_mem_ptr, entries);
3144                         rte_free(hinic_flow_mem_ptr);
3145                         break;
3146                 }
3147         }
3148         rte_free(flow);
3149
3150         PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
3151                         hinic_global_func_id(nic_dev->hwdev));
3152
3153         return ret;
3154 }
3155
3156 /* Remove all the n-tuple filters */
3157 static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev)
3158 {
3159         struct hinic_filter_info *filter_info =
3160                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3161         struct hinic_5tuple_filter *p_5tuple;
3162
3163         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
3164                 hinic_remove_5tuple_filter(dev, p_5tuple);
3165 }
3166
3167 /* Remove all the ether type filters */
3168 static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
3169 {
3170         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3171         struct hinic_filter_info *filter_info =
3172                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
3173         int ret = 0;
3174
3175         if (filter_info->type_mask &
3176                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) {
3177                 hinic_ethertype_filter_remove(filter_info,
3178                         HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE));
3179                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE,
3180                                         filter_info->qid, false, true);
3181
3182                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
3183         }
3184
3185         if (filter_info->type_mask &
3186                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) {
3187                 hinic_ethertype_filter_remove(filter_info,
3188                         HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE));
3189                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE,
3190                         filter_info->qid, false, true);
3191         }
3192
3193         if (ret)
3194                 PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x",
3195                                 filter_info->pkt_type);
3196 }
3197
3198 /* Remove all the ether type filters */
3199 static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
3200 {
3201         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3202         struct hinic_tcam_info *tcam_info =
3203                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
3204         struct hinic_tcam_filter *tcam_filter_ptr;
3205
3206         while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list)))
3207                 (void)hinic_del_tcam_filter(dev, tcam_filter_ptr);
3208
3209         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
3210
3211         (void)hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false);
3212
3213         (void)hinic_flush_tcam_rule(nic_dev->hwdev);
3214 }
3215
3216 static void hinic_filterlist_flush(struct rte_eth_dev *dev)
3217 {
3218         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3219         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3220         struct hinic_fdir_rule_ele *fdir_rule_ptr;
3221         struct hinic_flow_mem *hinic_flow_mem_ptr;
3222         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3223
3224         while ((ntuple_filter_ptr =
3225                         TAILQ_FIRST(&nic_dev->filter_ntuple_list))) {
3226                 TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr,
3227                                  entries);
3228                 rte_free(ntuple_filter_ptr);
3229         }
3230
3231         while ((ethertype_filter_ptr =
3232                         TAILQ_FIRST(&nic_dev->filter_ethertype_list))) {
3233                 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3234                                 ethertype_filter_ptr,
3235                                 entries);
3236                 rte_free(ethertype_filter_ptr);
3237         }
3238
3239         while ((fdir_rule_ptr =
3240                         TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) {
3241                 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr,
3242                                  entries);
3243                 rte_free(fdir_rule_ptr);
3244         }
3245
3246         while ((hinic_flow_mem_ptr =
3247                         TAILQ_FIRST(&nic_dev->hinic_flow_list))) {
3248                 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
3249                                  entries);
3250                 rte_free(hinic_flow_mem_ptr->flow);
3251                 rte_free(hinic_flow_mem_ptr);
3252         }
3253 }
3254
3255 /* Destroy all flow rules associated with a port on hinic. */
3256 static int hinic_flow_flush(struct rte_eth_dev *dev,
3257                                 __rte_unused struct rte_flow_error *error)
3258 {
3259         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3260
3261         hinic_clear_all_ntuple_filter(dev);
3262         hinic_clear_all_ethertype_filter(dev);
3263         hinic_clear_all_fdir_filter(dev);
3264         hinic_filterlist_flush(dev);
3265
3266         PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x",
3267                         hinic_global_func_id(nic_dev->hwdev));
3268         return 0;
3269 }
3270
3271 void hinic_destroy_fdir_filter(struct rte_eth_dev *dev)
3272 {
3273         hinic_clear_all_ntuple_filter(dev);
3274         hinic_clear_all_ethertype_filter(dev);
3275         hinic_clear_all_fdir_filter(dev);
3276         hinic_filterlist_flush(dev);
3277 }
3278
3279 const struct rte_flow_ops hinic_flow_ops = {
3280         .validate = hinic_flow_validate,
3281         .create = hinic_flow_create,
3282         .destroy = hinic_flow_destroy,
3283         .flush = hinic_flow_flush,
3284 };
3285