net/hinic: add flow director filter
[dpdk.git] / drivers / net / hinic / hinic_pmd_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
25
26 #define HINIC_MAX_RX_QUEUE_NUM          64
27
28 #ifndef UINT8_MAX
29 #define UINT8_MAX          (u8)(~((u8)0))       /* 0xFF               */
30 #define UINT16_MAX         (u16)(~((u16)0))     /* 0xFFFF             */
31 #define UINT32_MAX         (u32)(~((u32)0))     /* 0xFFFFFFFF         */
32 #define UINT64_MAX         (u64)(~((u64)0))     /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX          (0x7F)
34 #endif
35
36 /* IPSURX MACRO */
37 #define PA_ETH_TYPE_ROCE                0
38 #define PA_ETH_TYPE_IPV4                1
39 #define PA_ETH_TYPE_IPV6                2
40 #define PA_ETH_TYPE_OTHER               3
41
42 #define PA_IP_PROTOCOL_TYPE_TCP         1
43 #define PA_IP_PROTOCOL_TYPE_UDP         2
44 #define PA_IP_PROTOCOL_TYPE_ICMP        3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP   4
46 #define PA_IP_PROTOCOL_TYPE_SCTP        5
47 #define PA_IP_PROTOCOL_TYPE_VRRP        112
48
49 #define IP_HEADER_PROTOCOL_TYPE_TCP     6
50 #define IP_HEADER_PROTOCOL_TYPE_UDP     17
51 #define IP_HEADER_PROTOCOL_TYPE_ICMP    1
52
53 #define FDIR_TCAM_NORMAL_PACKET         0
54 #define FDIR_TCAM_TUNNEL_PACKET         1
55
56 #define HINIC_MIN_N_TUPLE_PRIO          1
57 #define HINIC_MAX_N_TUPLE_PRIO          7
58
59 /* TCAM type mask in hardware */
60 #define TCAM_PKT_BGP_SPORT      1
61 #define TCAM_PKT_VRRP           2
62 #define TCAM_PKT_BGP_DPORT      3
63 #define TCAM_PKT_LACP           4
64
65 #define BGP_DPORT_ID            179
66 #define IPPROTO_VRRP            112
67
68 /* Packet type defined in hardware to perform filter */
69 #define PKT_IGMP_IPV4_TYPE     64
70 #define PKT_ICMP_IPV4_TYPE     65
71 #define PKT_ICMP_IPV6_TYPE     66
72 #define PKT_ICMP_IPV6RS_TYPE   67
73 #define PKT_ICMP_IPV6RA_TYPE   68
74 #define PKT_ICMP_IPV6NS_TYPE   69
75 #define PKT_ICMP_IPV6NA_TYPE   70
76 #define PKT_ICMP_IPV6RE_TYPE   71
77 #define PKT_DHCP_IPV4_TYPE     72
78 #define PKT_DHCP_IPV6_TYPE     73
79 #define PKT_LACP_TYPE          74
80 #define PKT_ARP_REQ_TYPE       79
81 #define PKT_ARP_REP_TYPE       80
82 #define PKT_ARP_TYPE           81
83 #define PKT_BGPD_DPORT_TYPE    83
84 #define PKT_BGPD_SPORT_TYPE    84
85 #define PKT_VRRP_TYPE          85
86
87 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
88         (&((struct hinic_nic_dev *)nic_dev)->filter)
89
90 #define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \
91         (&((struct hinic_nic_dev *)nic_dev)->tcam)
92
93
94 enum hinic_atr_flow_type {
95         HINIC_ATR_FLOW_TYPE_IPV4_DIP    = 0x1,
96         HINIC_ATR_FLOW_TYPE_IPV4_SIP    = 0x2,
97         HINIC_ATR_FLOW_TYPE_DPORT       = 0x3,
98         HINIC_ATR_FLOW_TYPE_SPORT       = 0x4,
99 };
100
101 /* Structure to store fdir's info. */
102 struct hinic_fdir_info {
103         uint8_t fdir_flag;
104         uint8_t qid;
105         uint32_t fdir_key;
106 };
107
108 /**
109  * Endless loop will never happen with below assumption
110  * 1. there is at least one no-void item(END)
111  * 2. cur is before END.
112  */
113 static inline const struct rte_flow_item *
114 next_no_void_pattern(const struct rte_flow_item pattern[],
115                 const struct rte_flow_item *cur)
116 {
117         const struct rte_flow_item *next =
118                 cur ? cur + 1 : &pattern[0];
119         while (1) {
120                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
121                         return next;
122                 next++;
123         }
124 }
125
126 static inline const struct rte_flow_action *
127 next_no_void_action(const struct rte_flow_action actions[],
128                 const struct rte_flow_action *cur)
129 {
130         const struct rte_flow_action *next =
131                 cur ? cur + 1 : &actions[0];
132         while (1) {
133                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
134                         return next;
135                 next++;
136         }
137 }
138
139 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
140                                         struct rte_flow_error *error)
141 {
142         /* Must be input direction */
143         if (!attr->ingress) {
144                 rte_flow_error_set(error, EINVAL,
145                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
146                         attr, "Only support ingress.");
147                 return -rte_errno;
148         }
149
150         if (attr->egress) {
151                 rte_flow_error_set(error, EINVAL,
152                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
153                                 attr, "Not support egress.");
154                 return -rte_errno;
155         }
156
157         if (attr->priority) {
158                 rte_flow_error_set(error, EINVAL,
159                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
160                                 attr, "Not support priority.");
161                 return -rte_errno;
162         }
163
164         if (attr->group) {
165                 rte_flow_error_set(error, EINVAL,
166                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
167                                 attr, "Not support group.");
168                 return -rte_errno;
169         }
170
171         return 0;
172 }
173
174 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
175                                 const struct rte_flow_item *pattern,
176                                 const struct rte_flow_action *actions,
177                                 struct rte_flow_error *error)
178 {
179         if (!pattern) {
180                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
181                                 NULL, "NULL pattern.");
182                 return -rte_errno;
183         }
184
185         if (!actions) {
186                 rte_flow_error_set(error, EINVAL,
187                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
188                                 NULL, "NULL action.");
189                 return -rte_errno;
190         }
191
192         if (!attr) {
193                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
194                                    NULL, "NULL attribute.");
195                 return -rte_errno;
196         }
197
198         return 0;
199 }
200
201 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
202                                         struct rte_flow_error *error)
203 {
204         /* The first non-void item should be MAC */
205         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
206                 rte_flow_error_set(error, EINVAL,
207                         RTE_FLOW_ERROR_TYPE_ITEM,
208                         item, "Not supported by ethertype filter");
209                 return -rte_errno;
210         }
211
212         /* Not supported last point for range */
213         if (item->last) {
214                 rte_flow_error_set(error, EINVAL,
215                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
216                         item, "Not supported last point for range");
217                 return -rte_errno;
218         }
219
220         /* Get the MAC info. */
221         if (!item->spec || !item->mask) {
222                 rte_flow_error_set(error, EINVAL,
223                                 RTE_FLOW_ERROR_TYPE_ITEM,
224                                 item, "Not supported by ethertype filter");
225                 return -rte_errno;
226         }
227         return 0;
228 }
229
230 static int
231 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
232                         const struct rte_flow_action *act,
233                         const struct rte_flow_action_queue *act_q,
234                         struct rte_eth_ethertype_filter *filter,
235                         struct rte_flow_error *error)
236 {
237         /* Parse action */
238         act = next_no_void_action(actions, NULL);
239         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
240                 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
241                 rte_flow_error_set(error, EINVAL,
242                                 RTE_FLOW_ERROR_TYPE_ACTION,
243                                 act, "Not supported action.");
244                 return -rte_errno;
245         }
246
247         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
248                 act_q = (const struct rte_flow_action_queue *)act->conf;
249                 filter->queue = act_q->index;
250         } else {
251                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
252         }
253
254         /* Check if the next non-void item is END */
255         act = next_no_void_action(actions, act);
256         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
257                 rte_flow_error_set(error, EINVAL,
258                                 RTE_FLOW_ERROR_TYPE_ACTION,
259                                 act, "Not supported action.");
260                 return -rte_errno;
261         }
262
263         return 0;
264 }
265
266 /**
267  * Parse the rule to see if it is a ethertype rule.
268  * And get the ethertype filter info BTW.
269  * pattern:
270  * The first not void item can be ETH.
271  * The next not void item must be END.
272  * action:
273  * The first not void action should be QUEUE.
274  * The next not void action should be END.
275  * pattern example:
276  * ITEM         Spec                    Mask
277  * ETH          type    0x0807          0xFFFF
278  * END
279  * other members in mask and spec should set to 0x00.
280  * item->last should be NULL.
281  */
282 static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
283                         const struct rte_flow_item *pattern,
284                         const struct rte_flow_action *actions,
285                         struct rte_eth_ethertype_filter *filter,
286                         struct rte_flow_error *error)
287 {
288         const struct rte_flow_item *item;
289         const struct rte_flow_action *act = NULL;
290         const struct rte_flow_item_eth *eth_spec;
291         const struct rte_flow_item_eth *eth_mask;
292         const struct rte_flow_action_queue *act_q = NULL;
293
294         if (hinic_check_filter_arg(attr, pattern, actions, error))
295                 return -rte_errno;
296
297         item = next_no_void_pattern(pattern, NULL);
298         if (hinic_check_ethertype_first_item(item, error))
299                 return -rte_errno;
300
301         eth_spec = (const struct rte_flow_item_eth *)item->spec;
302         eth_mask = (const struct rte_flow_item_eth *)item->mask;
303
304         /*
305          * Mask bits of source MAC address must be full of 0.
306          * Mask bits of destination MAC address must be full
307          * of 1 or full of 0.
308          */
309         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
310             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
311              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
312                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
313                                 item, "Invalid ether address mask");
314                 return -rte_errno;
315         }
316
317         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
318                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
319                                 item, "Invalid ethertype mask");
320                 return -rte_errno;
321         }
322
323         /*
324          * If mask bits of destination MAC address
325          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
326          */
327         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
328                 filter->mac_addr = eth_spec->dst;
329                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
330         } else {
331                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
332         }
333         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
334
335         /* Check if the next non-void item is END. */
336         item = next_no_void_pattern(pattern, item);
337         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
338                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
339                         item, "Not supported by ethertype filter.");
340                 return -rte_errno;
341         }
342
343         if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
344                 return -rte_errno;
345
346         if (hinic_check_ethertype_attr_ele(attr, error))
347                 return -rte_errno;
348
349         return 0;
350 }
351
352 static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
353                         const struct rte_flow_attr *attr,
354                         const struct rte_flow_item pattern[],
355                         const struct rte_flow_action actions[],
356                         struct rte_eth_ethertype_filter *filter,
357                         struct rte_flow_error *error)
358 {
359         if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
360                 return -rte_errno;
361
362         /* NIC doesn't support MAC address. */
363         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
364                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
365                 rte_flow_error_set(error, EINVAL,
366                         RTE_FLOW_ERROR_TYPE_ITEM,
367                         NULL, "Not supported by ethertype filter");
368                 return -rte_errno;
369         }
370
371         if (filter->queue >= dev->data->nb_rx_queues) {
372                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
373                 rte_flow_error_set(error, EINVAL,
374                         RTE_FLOW_ERROR_TYPE_ITEM,
375                         NULL, "Queue index much too big");
376                 return -rte_errno;
377         }
378
379         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
380                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
381                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
382                 rte_flow_error_set(error, EINVAL,
383                         RTE_FLOW_ERROR_TYPE_ITEM,
384                         NULL, "IPv4/IPv6 not supported by ethertype filter");
385                 return -rte_errno;
386         }
387
388         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
389                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
390                 rte_flow_error_set(error, EINVAL,
391                         RTE_FLOW_ERROR_TYPE_ITEM,
392                         NULL, "Drop option is unsupported");
393                 return -rte_errno;
394         }
395
396         /* Hinic only support LACP/ARP for ether type */
397         if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
398                 filter->ether_type != RTE_ETHER_TYPE_ARP) {
399                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
400                 rte_flow_error_set(error, EINVAL,
401                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
402                         "only lacp/arp type supported by ethertype filter");
403                 return -rte_errno;
404         }
405
406         return 0;
407 }
408
409 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
410                                 struct rte_eth_ntuple_filter *filter,
411                                 struct rte_flow_error *error)
412 {
413         /* Must be input direction */
414         if (!attr->ingress) {
415                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
416                 rte_flow_error_set(error, EINVAL,
417                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
418                                    attr, "Only support ingress.");
419                 return -rte_errno;
420         }
421
422         if (attr->egress) {
423                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
424                 rte_flow_error_set(error, EINVAL,
425                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
426                                    attr, "Not support egress.");
427                 return -rte_errno;
428         }
429
430         if (attr->priority > 0xFFFF) {
431                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
432                 rte_flow_error_set(error, EINVAL,
433                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
434                                    attr, "Error priority.");
435                 return -rte_errno;
436         }
437
438         if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
439                     attr->priority > HINIC_MAX_N_TUPLE_PRIO)
440                 filter->priority = 1;
441         else
442                 filter->priority = (uint16_t)attr->priority;
443
444         return 0;
445 }
446
447 static int
448 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
449                         const struct rte_flow_action actions[],
450                         struct rte_eth_ntuple_filter *filter,
451                         struct rte_flow_error *error)
452 {
453         const struct rte_flow_action *act;
454         /*
455          * n-tuple only supports forwarding,
456          * check if the first not void action is QUEUE.
457          */
458         act = next_no_void_action(actions, NULL);
459         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
460                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
461                 rte_flow_error_set(error, EINVAL,
462                         RTE_FLOW_ERROR_TYPE_ACTION,
463                         act, "Flow action type is not QUEUE.");
464                 return -rte_errno;
465         }
466         filter->queue =
467                 ((const struct rte_flow_action_queue *)act->conf)->index;
468
469         /* Check if the next not void item is END */
470         act = next_no_void_action(actions, act);
471         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
472                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473                 rte_flow_error_set(error, EINVAL,
474                         RTE_FLOW_ERROR_TYPE_ACTION,
475                         act, "Next not void item is not END.");
476                 return -rte_errno;
477         }
478
479         return 0;
480 }
481
482 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
483                                         const struct rte_flow_item pattern[],
484                                         struct rte_flow_error *error)
485 {
486         const struct rte_flow_item *item;
487
488         /* The first not void item can be MAC or IPv4 */
489         item = next_no_void_pattern(pattern, NULL);
490
491         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
492                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
493                 rte_flow_error_set(error, EINVAL,
494                         RTE_FLOW_ERROR_TYPE_ITEM,
495                         item, "Not supported by ntuple filter");
496                 return -rte_errno;
497         }
498
499         /* Skip Ethernet */
500         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
501                 /* Not supported last point for range */
502                 if (item->last) {
503                         rte_flow_error_set(error,
504                                 EINVAL,
505                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
506                                 item, "Not supported last point for range");
507                         return -rte_errno;
508                 }
509                 /* if the first item is MAC, the content should be NULL */
510                 if (item->spec || item->mask) {
511                         rte_flow_error_set(error, EINVAL,
512                                 RTE_FLOW_ERROR_TYPE_ITEM,
513                                 item, "Not supported by ntuple filter");
514                         return -rte_errno;
515                 }
516                 /* check if the next not void item is IPv4 */
517                 item = next_no_void_pattern(pattern, item);
518                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
519                         rte_flow_error_set(error,
520                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
521                                 item, "Not supported by ntuple filter");
522                         return -rte_errno;
523                 }
524         }
525
526         *ipv4_item = item;
527         return 0;
528 }
529
530 static int
531 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
532                         const struct rte_flow_item pattern[],
533                         struct rte_eth_ntuple_filter *filter,
534                         struct rte_flow_error *error)
535 {
536         const struct rte_flow_item_ipv4 *ipv4_spec;
537         const struct rte_flow_item_ipv4 *ipv4_mask;
538         const struct rte_flow_item *item = *in_out_item;
539
540         /* Get the IPv4 info */
541         if (!item->spec || !item->mask) {
542                 rte_flow_error_set(error, EINVAL,
543                         RTE_FLOW_ERROR_TYPE_ITEM,
544                         item, "Invalid ntuple mask");
545                 return -rte_errno;
546         }
547         /* Not supported last point for range */
548         if (item->last) {
549                 rte_flow_error_set(error, EINVAL,
550                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
551                         item, "Not supported last point for range");
552                 return -rte_errno;
553         }
554
555         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
556         /*
557          * Only support src & dst addresses, protocol,
558          * others should be masked.
559          */
560         if (ipv4_mask->hdr.version_ihl ||
561                 ipv4_mask->hdr.type_of_service ||
562                 ipv4_mask->hdr.total_length ||
563                 ipv4_mask->hdr.packet_id ||
564                 ipv4_mask->hdr.fragment_offset ||
565                 ipv4_mask->hdr.time_to_live ||
566                 ipv4_mask->hdr.hdr_checksum ||
567                 !ipv4_mask->hdr.next_proto_id) {
568                 rte_flow_error_set(error,
569                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
570                         item, "Not supported by ntuple filter");
571                 return -rte_errno;
572         }
573
574         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
575         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
576         filter->proto_mask = ipv4_mask->hdr.next_proto_id;
577
578         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
579         filter->dst_ip = ipv4_spec->hdr.dst_addr;
580         filter->src_ip = ipv4_spec->hdr.src_addr;
581         filter->proto  = ipv4_spec->hdr.next_proto_id;
582
583         /* Get next no void item */
584         *in_out_item = next_no_void_pattern(pattern, item);
585         return 0;
586 }
587
588 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
589                                 const struct rte_flow_item pattern[],
590                                 struct rte_eth_ntuple_filter *filter,
591                                 struct rte_flow_error *error)
592 {
593         const struct rte_flow_item_tcp *tcp_spec;
594         const struct rte_flow_item_tcp *tcp_mask;
595         const struct rte_flow_item_icmp *icmp_mask;
596         const struct rte_flow_item *item = *in_out_item;
597         u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
598
599         if (item->type == RTE_FLOW_ITEM_TYPE_END)
600                 return 0;
601
602         /* Get TCP or UDP info */
603         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
604                 (!item->spec || !item->mask)) {
605                 memset(filter, 0, ntuple_filter_size);
606                 rte_flow_error_set(error, EINVAL,
607                         RTE_FLOW_ERROR_TYPE_ITEM,
608                         item, "Invalid ntuple mask");
609                 return -rte_errno;
610         }
611
612         /* Not supported last point for range */
613         if (item->last) {
614                 memset(filter, 0, ntuple_filter_size);
615                 rte_flow_error_set(error, EINVAL,
616                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
617                         item, "Not supported last point for range");
618                 return -rte_errno;
619         }
620
621         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
622                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
623
624                 /*
625                  * Only support src & dst ports, tcp flags,
626                  * others should be masked.
627                  */
628                 if (tcp_mask->hdr.sent_seq ||
629                         tcp_mask->hdr.recv_ack ||
630                         tcp_mask->hdr.data_off ||
631                         tcp_mask->hdr.rx_win ||
632                         tcp_mask->hdr.cksum ||
633                         tcp_mask->hdr.tcp_urp) {
634                         memset(filter, 0, ntuple_filter_size);
635                         rte_flow_error_set(error, EINVAL,
636                                 RTE_FLOW_ERROR_TYPE_ITEM,
637                                 item, "Not supported by ntuple filter");
638                         return -rte_errno;
639                 }
640
641                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
642                 filter->src_port_mask  = tcp_mask->hdr.src_port;
643                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
644                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
645                 } else if (!tcp_mask->hdr.tcp_flags) {
646                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
647                 } else {
648                         memset(filter, 0, ntuple_filter_size);
649                         rte_flow_error_set(error, EINVAL,
650                                 RTE_FLOW_ERROR_TYPE_ITEM,
651                                 item, "Not supported by ntuple filter");
652                         return -rte_errno;
653                 }
654
655                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
656                 filter->dst_port  = tcp_spec->hdr.dst_port;
657                 filter->src_port  = tcp_spec->hdr.src_port;
658                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
659         } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
660                 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
661
662                 /* ICMP all should be masked. */
663                 if (icmp_mask->hdr.icmp_cksum ||
664                         icmp_mask->hdr.icmp_ident ||
665                         icmp_mask->hdr.icmp_seq_nb ||
666                         icmp_mask->hdr.icmp_type ||
667                         icmp_mask->hdr.icmp_code) {
668                         memset(filter, 0, ntuple_filter_size);
669                         rte_flow_error_set(error, EINVAL,
670                                 RTE_FLOW_ERROR_TYPE_ITEM,
671                                 item, "Not supported by ntuple filter");
672                         return -rte_errno;
673                 }
674         }
675
676         /* Get next no void item */
677         *in_out_item = next_no_void_pattern(pattern, item);
678         return 0;
679 }
680
681 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
682                                         struct rte_eth_ntuple_filter *filter,
683                                         struct rte_flow_error *error)
684 {
685         /* Check if the next not void item is END */
686         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
687                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
688                 rte_flow_error_set(error, EINVAL,
689                         RTE_FLOW_ERROR_TYPE_ITEM,
690                         item, "Not supported by ntuple filter");
691                 return -rte_errno;
692         }
693         return 0;
694 }
695
696 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
697                                         const struct rte_flow_item pattern[],
698                                         struct rte_eth_ntuple_filter *filter,
699                                         struct rte_flow_error *error)
700 {
701         if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
702                 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
703                 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
704                 hinic_ntuple_item_check_end(item, filter, error))
705                 return -rte_errno;
706
707         return 0;
708 }
709
710 /**
711  * Parse the rule to see if it is a n-tuple rule.
712  * And get the n-tuple filter info BTW.
713  * pattern:
714  * The first not void item can be ETH or IPV4.
715  * The second not void item must be IPV4 if the first one is ETH.
716  * The third not void item must be UDP or TCP.
717  * The next not void item must be END.
718  * action:
719  * The first not void action should be QUEUE.
720  * The next not void action should be END.
721  * pattern example:
722  * ITEM         Spec                    Mask
723  * ETH          NULL                    NULL
724  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
725  *              dst_addr 192.167.3.50   0xFFFFFFFF
726  *              next_proto_id   17      0xFF
727  * UDP/TCP/     src_port        80      0xFFFF
728  * SCTP         dst_port        80      0xFFFF
729  * END
730  * other members in mask and spec should set to 0x00.
731  * item->last should be NULL.
732  * Please aware there's an asumption for all the parsers.
733  * rte_flow_item is using big endian, rte_flow_attr and
734  * rte_flow_action are using CPU order.
735  * Because the pattern is used to describe the packets,
736  * normally the packets should use network order.
737  */
738 static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
739                         const struct rte_flow_item pattern[],
740                         const struct rte_flow_action actions[],
741                         struct rte_eth_ntuple_filter *filter,
742                         struct rte_flow_error *error)
743 {
744         const struct rte_flow_item *item = NULL;
745
746         if (hinic_check_filter_arg(attr, pattern, actions, error))
747                 return -rte_errno;
748
749         if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
750                 return -rte_errno;
751
752         if (hinic_check_ntuple_act_ele(item, actions, filter, error))
753                 return -rte_errno;
754
755         if (hinic_check_ntuple_attr_ele(attr, filter, error))
756                 return -rte_errno;
757
758         return 0;
759 }
760
761 static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
762                         const struct rte_flow_attr *attr,
763                         const struct rte_flow_item pattern[],
764                         const struct rte_flow_action actions[],
765                         struct rte_eth_ntuple_filter *filter,
766                         struct rte_flow_error *error)
767 {
768         int ret;
769
770         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
771         if (ret)
772                 return ret;
773
774         /* Hinic doesn't support tcp flags */
775         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
776                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
777                 rte_flow_error_set(error, EINVAL,
778                                    RTE_FLOW_ERROR_TYPE_ITEM,
779                                    NULL, "Not supported by ntuple filter");
780                 return -rte_errno;
781         }
782
783         /* Hinic doesn't support many priorities */
784         if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
785             filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
786                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
787                 rte_flow_error_set(error, EINVAL,
788                         RTE_FLOW_ERROR_TYPE_ITEM,
789                         NULL, "Priority not supported by ntuple filter");
790                 return -rte_errno;
791         }
792
793         if (filter->queue >= dev->data->nb_rx_queues)
794                 return -rte_errno;
795
796         /* Fixed value for hinic */
797         filter->flags = RTE_5TUPLE_FLAGS;
798         return 0;
799 }
800
801 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
802                                         const struct rte_flow_item pattern[],
803                                         struct rte_flow_error *error)
804 {
805         const struct rte_flow_item *item;
806
807         /* The first not void item can be MAC or IPv4  or TCP or UDP */
808         item = next_no_void_pattern(pattern, NULL);
809
810         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
811                 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
812                 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
813                 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
814                 rte_flow_error_set(error, EINVAL,
815                         RTE_FLOW_ERROR_TYPE_ITEM, item,
816                         "Not supported by fdir filter,support mac,ipv4,tcp,udp");
817                 return -rte_errno;
818         }
819
820         /* Not supported last point for range */
821         if (item->last) {
822                 rte_flow_error_set(error, EINVAL,
823                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
824                         "Not supported last point for range");
825                 return -rte_errno;
826         }
827
828         /* Skip Ethernet */
829         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
830                 /* All should be masked. */
831                 if (item->spec || item->mask) {
832                         rte_flow_error_set(error, EINVAL,
833                                 RTE_FLOW_ERROR_TYPE_ITEM,
834                                 item, "Not supported by fdir filter,support mac");
835                         return -rte_errno;
836                 }
837                 /* Check if the next not void item is IPv4 */
838                 item = next_no_void_pattern(pattern, item);
839                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
840                         rte_flow_error_set(error, EINVAL,
841                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
842                                 "Not supported by fdir filter,support mac,ipv4");
843                         return -rte_errno;
844                 }
845         }
846
847         *ip_item = item;
848         return 0;
849 }
850
851 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
852                                 const struct rte_flow_item pattern[],
853                                 struct hinic_fdir_rule *rule,
854                                 struct rte_flow_error *error)
855 {
856         const struct rte_flow_item_ipv4 *ipv4_spec;
857         const struct rte_flow_item_ipv4 *ipv4_mask;
858         const struct rte_flow_item *item = *in_out_item;
859
860         /* Get the IPv4 info */
861         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
862                 /* Not supported last point for range */
863                 if (item->last) {
864                         rte_flow_error_set(error, EINVAL,
865                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
866                                 item, "Not supported last point for range");
867                         return -rte_errno;
868                 }
869
870                 if (!item->mask) {
871                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
872                         rte_flow_error_set(error, EINVAL,
873                                 RTE_FLOW_ERROR_TYPE_ITEM,
874                                 item, "Invalid fdir filter mask");
875                         return -rte_errno;
876                 }
877
878                 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
879                 /*
880                  * Only support src & dst addresses,
881                  * others should be masked.
882                  */
883                 if (ipv4_mask->hdr.version_ihl ||
884                         ipv4_mask->hdr.type_of_service ||
885                         ipv4_mask->hdr.total_length ||
886                         ipv4_mask->hdr.packet_id ||
887                         ipv4_mask->hdr.fragment_offset ||
888                         ipv4_mask->hdr.time_to_live ||
889                         ipv4_mask->hdr.next_proto_id ||
890                         ipv4_mask->hdr.hdr_checksum) {
891                         rte_flow_error_set(error,
892                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
893                                 "Not supported by fdir filter, support src,dst ip");
894                         return -rte_errno;
895                 }
896
897                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
898                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
899                 rule->mode = HINIC_FDIR_MODE_NORMAL;
900
901                 if (item->spec) {
902                         ipv4_spec =
903                                 (const struct rte_flow_item_ipv4 *)item->spec;
904                         rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
905                         rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
906                 }
907
908                 /*
909                  * Check if the next not void item is
910                  * TCP or UDP or END.
911                  */
912                 item = next_no_void_pattern(pattern, item);
913                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
914                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
915                     item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
916                     item->type != RTE_FLOW_ITEM_TYPE_ANY &&
917                     item->type != RTE_FLOW_ITEM_TYPE_END) {
918                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
919                         rte_flow_error_set(error, EINVAL,
920                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
921                                 "Not supported by fdir filter, support tcp, udp, end");
922                         return -rte_errno;
923                 }
924         }
925
926         *in_out_item = item;
927         return 0;
928 }
929
930 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
931                         __rte_unused const struct rte_flow_item pattern[],
932                         __rte_unused struct hinic_fdir_rule *rule,
933                         struct rte_flow_error *error)
934 {
935         const struct rte_flow_item *item = *in_out_item;
936
937         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
938                 rte_flow_error_set(error, EINVAL,
939                         RTE_FLOW_ERROR_TYPE_ITEM,
940                         item, "Not supported by normal fdir filter,not support l4");
941                 return -rte_errno;
942         }
943
944         return 0;
945 }
946
947
948 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
949                                         struct hinic_fdir_rule *rule,
950                                         struct rte_flow_error *error)
951 {
952         /* Check if the next not void item is END */
953         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
954                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
955                 rte_flow_error_set(error, EINVAL,
956                         RTE_FLOW_ERROR_TYPE_ITEM,
957                         item, "Not supported by fdir filter,support end");
958                 return -rte_errno;
959         }
960
961         return 0;
962 }
963
964 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
965                                         const struct rte_flow_item pattern[],
966                                         struct hinic_fdir_rule *rule,
967                                         struct rte_flow_error *error)
968 {
969         if (hinic_normal_item_check_ether(&item, pattern, error) ||
970                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
971                 hinic_normal_item_check_l4(&item, pattern, rule, error) ||
972                 hinic_normal_item_check_end(item, rule, error))
973                 return -rte_errno;
974
975         return 0;
976 }
977
978 static int
979 hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item,
980                                 const struct rte_flow_item pattern[],
981                                 struct hinic_fdir_rule *rule,
982                                 struct rte_flow_error *error)
983 {
984         const struct rte_flow_item *item = *in_out_item;
985         const struct rte_flow_item_tcp *tcp_spec;
986         const struct rte_flow_item_tcp *tcp_mask;
987         const struct rte_flow_item_udp *udp_spec;
988         const struct rte_flow_item_udp *udp_mask;
989
990         if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
991                 rule->mode = HINIC_FDIR_MODE_TCAM;
992                 rule->mask.proto_mask = UINT16_MAX;
993                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
994         } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
995                 rule->mode = HINIC_FDIR_MODE_TCAM;
996         } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
997                 if (!item->mask) {
998                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
999                         rte_flow_error_set(error, EINVAL,
1000                                 RTE_FLOW_ERROR_TYPE_ITEM,
1001                                 item, "Not supported by fdir filter, support src, dst ports");
1002                         return -rte_errno;
1003                 }
1004
1005                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1006
1007                 /*
1008                  * Only support src & dst ports, tcp flags,
1009                  * others should be masked.
1010                  */
1011                 if (tcp_mask->hdr.sent_seq ||
1012                         tcp_mask->hdr.recv_ack ||
1013                         tcp_mask->hdr.data_off ||
1014                         tcp_mask->hdr.rx_win ||
1015                         tcp_mask->hdr.cksum ||
1016                         tcp_mask->hdr.tcp_urp) {
1017                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1018                         rte_flow_error_set(error, EINVAL,
1019                                 RTE_FLOW_ERROR_TYPE_ITEM,
1020                                 item, "Not supported by fdir normal tcam filter");
1021                         return -rte_errno;
1022                 }
1023
1024                 rule->mode = HINIC_FDIR_MODE_TCAM;
1025                 rule->mask.proto_mask = UINT16_MAX;
1026                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1027                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1028
1029                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1030                 if (item->spec) {
1031                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1032                         rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port;
1033                         rule->hinic_fdir.src_port = tcp_spec->hdr.src_port;
1034                 }
1035         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1036                 /*
1037                  * Only care about src & dst ports,
1038                  * others should be masked.
1039                  */
1040                 if (!item->mask) {
1041                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1042                         rte_flow_error_set(error, EINVAL,
1043                                 RTE_FLOW_ERROR_TYPE_ITEM,
1044                                 item, "Not supported by fdir filter, support src, dst ports");
1045                         return -rte_errno;
1046                 }
1047
1048                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1049                 if (udp_mask->hdr.dgram_len ||
1050                         udp_mask->hdr.dgram_cksum) {
1051                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1052                         rte_flow_error_set(error, EINVAL,
1053                                 RTE_FLOW_ERROR_TYPE_ITEM,
1054                                 item, "Not supported by fdir filter, support udp");
1055                         return -rte_errno;
1056                 }
1057
1058                 rule->mode = HINIC_FDIR_MODE_TCAM;
1059                 rule->mask.proto_mask = UINT16_MAX;
1060                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1061                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1062
1063                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1064                 if (item->spec) {
1065                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1066                         rule->hinic_fdir.src_port = udp_spec->hdr.src_port;
1067                         rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port;
1068                 }
1069         } else {
1070                 (void)memset(rule,  0, sizeof(struct hinic_fdir_rule));
1071                 rte_flow_error_set(error, EINVAL,
1072                                 RTE_FLOW_ERROR_TYPE_ITEM,
1073                                 item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp");
1074                 return -rte_errno;
1075         }
1076
1077         item = next_no_void_pattern(pattern, item);
1078         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1079                 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1080                 rte_flow_error_set(error, EINVAL,
1081                         RTE_FLOW_ERROR_TYPE_ITEM,
1082                         item, "Not supported by fdir filter tcam normal, support end");
1083                 return -rte_errno;
1084         }
1085
1086         /* get next no void item */
1087         *in_out_item = item;
1088
1089         return 0;
1090 }
1091
1092 static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item,
1093                                         const struct rte_flow_item pattern[],
1094                                         struct hinic_fdir_rule *rule,
1095                                         struct rte_flow_error *error)
1096 {
1097         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1098                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1099                 hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) ||
1100                 hinic_normal_item_check_end(item, rule, error))
1101                 return -rte_errno;
1102
1103         return 0;
1104 }
1105
1106 static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item,
1107                                         const struct rte_flow_item pattern[],
1108                                         struct hinic_fdir_rule *rule,
1109                                         struct rte_flow_error *error)
1110 {
1111         const struct rte_flow_item *item = *in_out_item;
1112
1113         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1114                 item = next_no_void_pattern(pattern, item);
1115                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1116                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1117                         rte_flow_error_set(error, EINVAL,
1118                                 RTE_FLOW_ERROR_TYPE_ITEM,
1119                                 item, "Not supported by fdir filter, support vxlan");
1120                         return -rte_errno;
1121                 }
1122
1123                 *in_out_item = item;
1124         } else {
1125                 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1126                 rte_flow_error_set(error, EINVAL,
1127                                 RTE_FLOW_ERROR_TYPE_ITEM,
1128                                 item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp");
1129                 return -rte_errno;
1130         }
1131
1132         return 0;
1133 }
1134
1135 static int
1136 hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item,
1137                                 const struct rte_flow_item pattern[],
1138                                 struct hinic_fdir_rule *rule,
1139                                 struct rte_flow_error *error)
1140 {
1141         const struct rte_flow_item *item = *in_out_item;
1142
1143
1144         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
1145                 item = next_no_void_pattern(pattern, item);
1146                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1147                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1148                     item->type != RTE_FLOW_ITEM_TYPE_ANY) {
1149                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1150                         rte_flow_error_set(error, EINVAL,
1151                                 RTE_FLOW_ERROR_TYPE_ITEM,
1152                                 item, "Not supported by fdir filter, support tcp/udp");
1153                         return -rte_errno;
1154                 }
1155
1156                 *in_out_item = item;
1157         }
1158
1159         return 0;
1160 }
1161
1162 static int
1163 hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item,
1164                                 const struct rte_flow_item pattern[],
1165                                 struct hinic_fdir_rule *rule,
1166                                 struct rte_flow_error *error)
1167 {
1168         const struct rte_flow_item_tcp *tcp_spec;
1169         const struct rte_flow_item_tcp *tcp_mask;
1170         const struct rte_flow_item_udp *udp_spec;
1171         const struct rte_flow_item_udp *udp_mask;
1172         const struct rte_flow_item *item = *in_out_item;
1173
1174         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1175                 /* Not supported last point for range */
1176                 if (item->last) {
1177                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1178                         rte_flow_error_set(error, EINVAL,
1179                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1180                                 item, "Not supported last point for range");
1181                         return -rte_errno;
1182                 }
1183
1184                 /* get the TCP/UDP info */
1185                 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1186                         /*
1187                          * Only care about src & dst ports,
1188                          * others should be masked.
1189                          */
1190                         if (!item->mask) {
1191                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1192                                 rte_flow_error_set(error, EINVAL,
1193                                         RTE_FLOW_ERROR_TYPE_ITEM,
1194                                         item, "Not supported by fdir filter, support src, dst ports");
1195                                 return -rte_errno;
1196                         }
1197
1198                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1199                         if (tcp_mask->hdr.sent_seq ||
1200                                 tcp_mask->hdr.recv_ack ||
1201                                 tcp_mask->hdr.data_off ||
1202                                 tcp_mask->hdr.tcp_flags ||
1203                                 tcp_mask->hdr.rx_win ||
1204                                 tcp_mask->hdr.cksum ||
1205                                 tcp_mask->hdr.tcp_urp) {
1206                                 (void)memset(rule, 0,
1207                                         sizeof(struct hinic_fdir_rule));
1208                                 rte_flow_error_set(error, EINVAL,
1209                                         RTE_FLOW_ERROR_TYPE_ITEM,
1210                                         item, "Not supported by fdir filter, support tcp");
1211                                 return -rte_errno;
1212                         }
1213
1214                         rule->mode = HINIC_FDIR_MODE_TCAM;
1215                         rule->mask.tunnel_flag = UINT16_MAX;
1216                         rule->mask.tunnel_inner_src_port_mask =
1217                                                         tcp_mask->hdr.src_port;
1218                         rule->mask.tunnel_inner_dst_port_mask =
1219                                                         tcp_mask->hdr.dst_port;
1220                         rule->mask.proto_mask = UINT16_MAX;
1221
1222                         rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1223                         if (item->spec) {
1224                                 tcp_spec =
1225                                 (const struct rte_flow_item_tcp *)item->spec;
1226                                 rule->hinic_fdir.tunnel_inner_src_port =
1227                                                         tcp_spec->hdr.src_port;
1228                                 rule->hinic_fdir.tunnel_inner_dst_port =
1229                                                         tcp_spec->hdr.dst_port;
1230                         }
1231                 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1232                         /*
1233                          * Only care about src & dst ports,
1234                          * others should be masked.
1235                          */
1236                         if (!item->mask) {
1237                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1238                                 rte_flow_error_set(error, EINVAL,
1239                                         RTE_FLOW_ERROR_TYPE_ITEM,
1240                                         item, "Not supported by fdir filter, support src, dst ports");
1241                                 return -rte_errno;
1242                         }
1243
1244                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
1245                         if (udp_mask->hdr.dgram_len ||
1246                             udp_mask->hdr.dgram_cksum) {
1247                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1248                                 rte_flow_error_set(error, EINVAL,
1249                                         RTE_FLOW_ERROR_TYPE_ITEM,
1250                                         item, "Not supported by fdir filter, support udp");
1251                                 return -rte_errno;
1252                         }
1253
1254                         rule->mode = HINIC_FDIR_MODE_TCAM;
1255                         rule->mask.tunnel_flag = UINT16_MAX;
1256                         rule->mask.tunnel_inner_src_port_mask =
1257                                                         udp_mask->hdr.src_port;
1258                         rule->mask.tunnel_inner_dst_port_mask =
1259                                                         udp_mask->hdr.dst_port;
1260                         rule->mask.proto_mask = UINT16_MAX;
1261
1262                         rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1263                         if (item->spec) {
1264                                 udp_spec =
1265                                 (const struct rte_flow_item_udp *)item->spec;
1266                                 rule->hinic_fdir.tunnel_inner_src_port =
1267                                                         udp_spec->hdr.src_port;
1268                                 rule->hinic_fdir.tunnel_inner_dst_port =
1269                                                         udp_spec->hdr.dst_port;
1270                         }
1271                 } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1272                         rule->mode = HINIC_FDIR_MODE_TCAM;
1273                         rule->mask.tunnel_flag = UINT16_MAX;
1274                 } else {
1275                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1276                         rte_flow_error_set(error, EINVAL,
1277                                 RTE_FLOW_ERROR_TYPE_ITEM,
1278                                 item, "Not supported by fdir filter, support tcp/udp");
1279                         return -rte_errno;
1280                 }
1281
1282                 /* get next no void item */
1283                 *in_out_item = next_no_void_pattern(pattern, item);
1284         }
1285
1286         return 0;
1287 }
1288
1289 static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item,
1290                                         const struct rte_flow_item pattern[],
1291                                         struct hinic_fdir_rule *rule,
1292                                         struct rte_flow_error *error)
1293 {
1294         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1295                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1296                 hinic_tunnel_item_check_l4(&item, pattern, rule, error) ||
1297                 hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) ||
1298                 hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) ||
1299                 hinic_normal_item_check_end(item, rule, error))
1300                 return -rte_errno;
1301
1302         return 0;
1303 }
1304
1305 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1306                                         struct hinic_fdir_rule *rule,
1307                                         struct rte_flow_error *error)
1308 {
1309         /* Must be input direction */
1310         if (!attr->ingress) {
1311                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1312                 rte_flow_error_set(error, EINVAL,
1313                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1314                                    attr, "Only support ingress.");
1315                 return -rte_errno;
1316         }
1317
1318         /* Not supported */
1319         if (attr->egress) {
1320                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1321                 rte_flow_error_set(error, EINVAL,
1322                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1323                                    attr, "Not support egress.");
1324                 return -rte_errno;
1325         }
1326
1327         /* Not supported */
1328         if (attr->priority) {
1329                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1330                 rte_flow_error_set(error, EINVAL,
1331                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1332                         attr, "Not support priority.");
1333                 return -rte_errno;
1334         }
1335
1336         return 0;
1337 }
1338
1339 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1340                                 const struct rte_flow_action actions[],
1341                                 struct hinic_fdir_rule *rule,
1342                                 struct rte_flow_error *error)
1343 {
1344         const struct rte_flow_action *act;
1345
1346         /* Check if the first not void action is QUEUE */
1347         act = next_no_void_action(actions, NULL);
1348         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1349                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1350                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1351                         item, "Not supported action.");
1352                 return -rte_errno;
1353         }
1354
1355         rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1356
1357         /* Check if the next not void item is END */
1358         act = next_no_void_action(actions, act);
1359         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1360                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1361                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1362                         act, "Not supported action.");
1363                 return -rte_errno;
1364         }
1365
1366         return 0;
1367 }
1368
1369 /**
1370  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1371  * And get the flow director filter info BTW.
1372  * UDP/TCP/SCTP PATTERN:
1373  * The first not void item can be ETH or IPV4 or IPV6
1374  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1375  * The next not void item could be UDP or TCP(optional)
1376  * The next not void item must be END.
1377  * ACTION:
1378  * The first not void action should be QUEUE.
1379  * The second not void optional action should be MARK,
1380  * mark_id is a uint32_t number.
1381  * The next not void action should be END.
1382  * UDP/TCP pattern example:
1383  * ITEM          Spec                                       Mask
1384  * ETH            NULL                                    NULL
1385  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1386  *                   dst_addr  1.2.3.5                 0xFFFFFFFF
1387  * UDP/TCP    src_port  80                         0xFFFF
1388  *                   dst_port  80                         0xFFFF
1389  * END
1390  * Other members in mask and spec should set to 0x00.
1391  * Item->last should be NULL.
1392  */
1393 static int
1394 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1395                                const struct rte_flow_item pattern[],
1396                                const struct rte_flow_action actions[],
1397                                struct hinic_fdir_rule *rule,
1398                                struct rte_flow_error *error)
1399 {
1400         const struct rte_flow_item *item = NULL;
1401
1402         if (hinic_check_filter_arg(attr, pattern, actions, error))
1403                 return -rte_errno;
1404
1405         if (hinic_check_normal_item_ele(item, pattern, rule, error))
1406                 return -rte_errno;
1407
1408         if (hinic_check_normal_attr_ele(attr, rule, error))
1409                 return -rte_errno;
1410
1411         if (hinic_check_normal_act_ele(item, actions, rule, error))
1412                 return -rte_errno;
1413
1414         return 0;
1415 }
1416
1417 /**
1418  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1419  * And get the flow director filter info BTW.
1420  * UDP/TCP/SCTP PATTERN:
1421  * The first not void item can be ETH or IPV4 or IPV6
1422  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1423  * The next not void item can be ANY/TCP/UDP
1424  * ACTION:
1425  * The first not void action should be QUEUE.
1426  * The second not void optional action should be MARK,
1427  * mark_id is a uint32_t number.
1428  * The next not void action should be END.
1429  * UDP/TCP pattern example:
1430  * ITEM                 Spec                           Mask
1431  * ETH            NULL                                 NULL
1432  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1433  *                dst_addr  1.2.3.5                 0xFFFFFFFF
1434  * UDP/TCP        src_port  80                      0xFFFF
1435  *                dst_port  80                      0xFFFF
1436  * END
1437  * Other members in mask and spec should set to 0x00.
1438  * Item->last should be NULL.
1439  */
1440 static int
1441 hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr,
1442                                const struct rte_flow_item pattern[],
1443                                const struct rte_flow_action actions[],
1444                                struct hinic_fdir_rule *rule,
1445                                struct rte_flow_error *error)
1446 {
1447         const struct rte_flow_item *item = NULL;
1448
1449         if (hinic_check_filter_arg(attr, pattern, actions, error))
1450                 return -rte_errno;
1451
1452         if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error))
1453                 return -rte_errno;
1454
1455         if (hinic_check_normal_attr_ele(attr, rule, error))
1456                 return -rte_errno;
1457
1458         if (hinic_check_normal_act_ele(item, actions, rule, error))
1459                 return -rte_errno;
1460
1461         return 0;
1462 }
1463
1464 /**
1465  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1466  * And get the flow director filter info BTW.
1467  * UDP/TCP/SCTP PATTERN:
1468  * The first not void item can be ETH or IPV4 or IPV6
1469  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1470  * The next not void item must be UDP
1471  * The next not void item must be VXLAN(optional)
1472  * The first not void item can be ETH or IPV4 or IPV6
1473  * The next not void item could be ANY or UDP or TCP(optional)
1474  * The next not void item must be END.
1475  * ACTION:
1476  * The first not void action should be QUEUE.
1477  * The second not void optional action should be MARK,
1478  * mark_id is a uint32_t number.
1479  * The next not void action should be END.
1480  * UDP/TCP pattern example:
1481  * ITEM             Spec                            Mask
1482  * ETH            NULL                              NULL
1483  * IPV4        src_addr  1.2.3.6                 0xFFFFFFFF
1484  *             dst_addr  1.2.3.5                 0xFFFFFFFF
1485  * UDP            NULL                              NULL
1486  * VXLAN          NULL                              NULL
1487  * UDP/TCP     src_port  80                      0xFFFF
1488  *             dst_port  80                      0xFFFF
1489  * END
1490  * Other members in mask and spec should set to 0x00.
1491  * Item->last should be NULL.
1492  */
1493 static int
1494 hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr,
1495                                const struct rte_flow_item pattern[],
1496                                const struct rte_flow_action actions[],
1497                                struct hinic_fdir_rule *rule,
1498                                struct rte_flow_error *error)
1499 {
1500         const struct rte_flow_item *item = NULL;
1501
1502         if (hinic_check_filter_arg(attr, pattern, actions, error))
1503                 return -rte_errno;
1504
1505         if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error))
1506                 return -rte_errno;
1507
1508         if (hinic_check_normal_attr_ele(attr, rule, error))
1509                 return -rte_errno;
1510
1511         if (hinic_check_normal_act_ele(item, actions, rule, error))
1512                 return -rte_errno;
1513
1514         return 0;
1515 }
1516
1517 static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1518                         const struct rte_flow_attr *attr,
1519                         const struct rte_flow_item pattern[],
1520                         const struct rte_flow_action actions[],
1521                         struct hinic_fdir_rule *rule,
1522                         struct rte_flow_error *error)
1523 {
1524         int ret;
1525
1526         ret = hinic_parse_fdir_filter_normal(attr, pattern, actions,
1527                                                 rule, error);
1528         if (!ret)
1529                 goto step_next;
1530
1531         ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions,
1532                                                 rule, error);
1533         if (!ret)
1534                 goto step_next;
1535
1536         ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions,
1537                                                 rule, error);
1538         if (ret)
1539                 return ret;
1540
1541 step_next:
1542         if (rule->queue >= dev->data->nb_rx_queues)
1543                 return -ENOTSUP;
1544
1545         return ret;
1546 }
1547
1548 /**
1549  * Check if the flow rule is supported by nic.
1550  * It only checkes the format. Don't guarantee the rule can be programmed into
1551  * the HW. Because there can be no enough room for the rule.
1552  */
1553 static int hinic_flow_validate(struct rte_eth_dev *dev,
1554                                 const struct rte_flow_attr *attr,
1555                                 const struct rte_flow_item pattern[],
1556                                 const struct rte_flow_action actions[],
1557                                 struct rte_flow_error *error)
1558 {
1559         struct rte_eth_ethertype_filter ethertype_filter;
1560         struct rte_eth_ntuple_filter ntuple_filter;
1561         struct hinic_fdir_rule fdir_rule;
1562         int ret;
1563
1564         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1565         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1566                                 actions, &ntuple_filter, error);
1567         if (!ret)
1568                 return 0;
1569
1570         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1571         ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1572                                 actions, &ethertype_filter, error);
1573
1574         if (!ret)
1575                 return 0;
1576
1577         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1578         ret = hinic_parse_fdir_filter(dev, attr, pattern,
1579                                 actions, &fdir_rule, error);
1580
1581         return ret;
1582 }
1583
1584 static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1585                  struct hinic_5tuple_filter_info *hinic_filter_info)
1586 {
1587         switch (filter->dst_ip_mask) {
1588         case UINT32_MAX:
1589                 hinic_filter_info->dst_ip_mask = 0;
1590                 hinic_filter_info->dst_ip = filter->dst_ip;
1591                 break;
1592         case 0:
1593                 hinic_filter_info->dst_ip_mask = 1;
1594                 hinic_filter_info->dst_ip = 0;
1595                 break;
1596         default:
1597                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1598                 return -EINVAL;
1599         }
1600
1601         switch (filter->src_ip_mask) {
1602         case UINT32_MAX:
1603                 hinic_filter_info->src_ip_mask = 0;
1604                 hinic_filter_info->src_ip = filter->src_ip;
1605                 break;
1606         case 0:
1607                 hinic_filter_info->src_ip_mask = 1;
1608                 hinic_filter_info->src_ip = 0;
1609                 break;
1610         default:
1611                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1612                 return -EINVAL;
1613         }
1614         return 0;
1615 }
1616
1617 static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1618                    struct hinic_5tuple_filter_info *hinic_filter_info)
1619 {
1620         switch (filter->dst_port_mask) {
1621         case UINT16_MAX:
1622                 hinic_filter_info->dst_port_mask = 0;
1623                 hinic_filter_info->dst_port = filter->dst_port;
1624                 break;
1625         case 0:
1626                 hinic_filter_info->dst_port_mask = 1;
1627                 hinic_filter_info->dst_port = 0;
1628                 break;
1629         default:
1630                 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1631                 return -EINVAL;
1632         }
1633
1634         switch (filter->src_port_mask) {
1635         case UINT16_MAX:
1636                 hinic_filter_info->src_port_mask = 0;
1637                 hinic_filter_info->src_port = filter->src_port;
1638                 break;
1639         case 0:
1640                 hinic_filter_info->src_port_mask = 1;
1641                 hinic_filter_info->src_port = 0;
1642                 break;
1643         default:
1644                 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1645                 return -EINVAL;
1646         }
1647
1648         return 0;
1649 }
1650
1651 static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1652                     struct hinic_5tuple_filter_info *hinic_filter_info)
1653 {
1654         switch (filter->proto_mask) {
1655         case UINT8_MAX:
1656                 hinic_filter_info->proto_mask = 0;
1657                 hinic_filter_info->proto = filter->proto;
1658                 break;
1659         case 0:
1660                 hinic_filter_info->proto_mask = 1;
1661                 hinic_filter_info->proto = 0;
1662                 break;
1663         default:
1664                 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1665                 return -EINVAL;
1666         }
1667
1668         return 0;
1669 }
1670
1671 static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1672                         struct hinic_5tuple_filter_info *filter_info)
1673 {
1674         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1675                 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1676                 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1677                 return -EINVAL;
1678
1679         if (ntuple_ip_filter(filter, filter_info) ||
1680                 ntuple_port_filter(filter, filter_info) ||
1681                 ntuple_proto_filter(filter, filter_info))
1682                 return -EINVAL;
1683
1684         filter_info->priority = (uint8_t)filter->priority;
1685         return 0;
1686 }
1687
1688 static inline struct hinic_5tuple_filter *
1689 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1690                            struct hinic_5tuple_filter_info *key)
1691 {
1692         struct hinic_5tuple_filter *it;
1693
1694         TAILQ_FOREACH(it, filter_list, entries) {
1695                 if (memcmp(key, &it->filter_info,
1696                         sizeof(struct hinic_5tuple_filter_info)) == 0) {
1697                         return it;
1698                 }
1699         }
1700
1701         return NULL;
1702 }
1703
1704 static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
1705 {
1706         struct tag_pa_rule lacp_rule;
1707         struct tag_pa_action lacp_action;
1708
1709         memset(&lacp_rule, 0, sizeof(lacp_rule));
1710         memset(&lacp_action, 0, sizeof(lacp_action));
1711         /* LACP TCAM rule */
1712         lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
1713         lacp_rule.l2_header.eth_type.val16 = 0x8809;
1714         lacp_rule.l2_header.eth_type.mask16 = 0xffff;
1715
1716         /* LACP TCAM action */
1717         lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
1718         lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1719         lacp_action.pkt_type = PKT_LACP_TYPE;
1720         lacp_action.pri = 0x0;
1721         lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
1722
1723         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
1724                                         &lacp_rule, &lacp_action);
1725 }
1726
1727 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1728 {
1729         struct tag_pa_rule bgp_rule;
1730         struct tag_pa_action bgp_action;
1731
1732         memset(&bgp_rule, 0, sizeof(bgp_rule));
1733         memset(&bgp_action, 0, sizeof(bgp_action));
1734         /* BGP TCAM rule */
1735         bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1736         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1737         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1738         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1739         bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1740         bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1741
1742         /* BGP TCAM action */
1743         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1744         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1745         bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1746         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1747                                * results, not need to convert
1748                                */
1749         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1750
1751         return hinic_set_fdir_tcam(nic_dev->hwdev,
1752                         TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1753 }
1754
1755 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1756 {
1757         struct tag_pa_rule bgp_rule;
1758         struct tag_pa_action bgp_action;
1759
1760         memset(&bgp_rule, 0, sizeof(bgp_rule));
1761         memset(&bgp_action, 0, sizeof(bgp_action));
1762         /* BGP TCAM rule */
1763         bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1764         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1765         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1766         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1767         bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1768         bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1769
1770         /* BGP TCAM action */
1771         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1772         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1773         bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1774         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1775                                * results, not need to convert
1776                                */
1777         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1778
1779         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1780                                         &bgp_rule, &bgp_action);
1781 }
1782
1783 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1784 {
1785         struct tag_pa_rule vrrp_rule;
1786         struct tag_pa_action vrrp_action;
1787
1788         memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1789         memset(&vrrp_action, 0, sizeof(vrrp_action));
1790         /* VRRP TCAM rule */
1791         vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1792         vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1793         vrrp_rule.ip_header.protocol.mask8 = 0xff;
1794         vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1795
1796         /* VRRP TCAM action */
1797         vrrp_action.err_type = 0x3f;
1798         vrrp_action.fwd_action = 0x7;
1799         vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1800         vrrp_action.pri = 0xf;
1801         vrrp_action.push_len = 0xf;
1802
1803         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1804                                         &vrrp_rule, &vrrp_action);
1805 }
1806
1807 /**
1808  *  Clear all fdir configuration.
1809  *
1810  * @param nic_dev
1811  *   The hardware interface of a Ethernet device.
1812  *
1813  * @return
1814  *   0 on success,
1815  *   negative error value otherwise.
1816  */
1817 void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
1818 {
1819         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
1820
1821         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
1822
1823         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
1824
1825         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1826
1827         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
1828
1829         (void)hinic_flush_tcam_rule(nic_dev->hwdev);
1830 }
1831
1832 static int hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1833                        struct hinic_filter_info *filter_info)
1834 {
1835         switch (filter->filter_info.proto) {
1836         case IPPROTO_TCP:
1837                 /* Filter type is bgp type if dst_port or src_port is 179 */
1838                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1839                         !(filter->filter_info.dst_port_mask)) {
1840                         filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1841                 } else if (filter->filter_info.src_port ==
1842                         RTE_BE16(BGP_DPORT_ID) &&
1843                         !(filter->filter_info.src_port_mask)) {
1844                         filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1845                 } else {
1846                         PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1847                         " just support BGP now, proto:0x%x, "
1848                         "dst_port:0x%x, dst_port_mask:0x%x."
1849                         "src_port:0x%x, src_port_mask:0x%x.",
1850                         filter->filter_info.proto,
1851                         filter->filter_info.dst_port,
1852                         filter->filter_info.dst_port_mask,
1853                         filter->filter_info.src_port,
1854                         filter->filter_info.src_port_mask);
1855                         return -EINVAL;
1856                 }
1857                 break;
1858
1859         case IPPROTO_VRRP:
1860                 filter_info->pkt_type = PKT_VRRP_TYPE;
1861                 break;
1862
1863         case IPPROTO_ICMP:
1864                 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1865                 break;
1866
1867         case IPPROTO_ICMPV6:
1868                 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1869                 break;
1870
1871         default:
1872                 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1873                 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1874                 "src_port: 0x%x, src_port_mask: 0x%x.",
1875                 filter->filter_info.proto, filter->filter_info.dst_port,
1876                 filter->filter_info.dst_port_mask,
1877                 filter->filter_info.src_port,
1878                 filter->filter_info.src_port_mask);
1879                 return -EINVAL;
1880         }
1881
1882         return 0;
1883 }
1884
1885 static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1886                         struct hinic_filter_info *filter_info, int *index)
1887 {
1888         int type_id;
1889
1890         type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1891
1892         if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1893                 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1894                 return -EINVAL;
1895         }
1896
1897         if (!(filter_info->type_mask & (1 << type_id))) {
1898                 filter_info->type_mask |= 1 << type_id;
1899                 filter->index = type_id;
1900                 filter_info->pkt_filters[type_id].enable = true;
1901                 filter_info->pkt_filters[type_id].pkt_proto =
1902                                                 filter->filter_info.proto;
1903                 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1904                                   filter, entries);
1905         } else {
1906                 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1907                 return -EIO;
1908         }
1909
1910         *index = type_id;
1911         return 0;
1912 }
1913
1914 /*
1915  * Add a 5tuple filter
1916  *
1917  * @param dev:
1918  *  Pointer to struct rte_eth_dev.
1919  * @param filter:
1920  *  Pointer to the filter that will be added.
1921  * @return
1922  *    - On success, zero.
1923  *    - On failure, a negative value.
1924  */
1925 static int hinic_add_5tuple_filter(struct rte_eth_dev *dev,
1926                                 struct hinic_5tuple_filter *filter)
1927 {
1928         struct hinic_filter_info *filter_info =
1929                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1930         int i, ret_fw;
1931         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1932
1933         if (hinic_filter_info_init(filter, filter_info) ||
1934                 hinic_lookup_new_filter(filter, filter_info, &i))
1935                 return -EFAULT;
1936
1937         ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
1938                                         filter_info->qid,
1939                                         filter_info->pkt_filters[i].enable,
1940                                         true);
1941         if (ret_fw) {
1942                 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1943                         filter_info->pkt_type, filter->queue,
1944                         filter_info->pkt_filters[i].enable);
1945                 return -EFAULT;
1946         }
1947
1948         PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1949                         filter_info->pkt_type, filter_info->qid,
1950                         filter_info->pkt_filters[filter->index].enable);
1951
1952         switch (filter->filter_info.proto) {
1953         case IPPROTO_TCP:
1954                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
1955                         ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
1956                         if (ret_fw) {
1957                                 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
1958                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
1959                                         filter_info->pkt_type, filter->queue,
1960                                         filter_info->pkt_filters[i].enable);
1961                                 return -EFAULT;
1962                         }
1963
1964                         PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
1965                                 filter->queue,
1966                                 filter_info->pkt_filters[i].enable);
1967                 } else if (filter->filter_info.src_port ==
1968                         RTE_BE16(BGP_DPORT_ID)) {
1969                         ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
1970                         if (ret_fw) {
1971                                 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
1972                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
1973                                         filter_info->pkt_type, filter->queue,
1974                                         filter_info->pkt_filters[i].enable);
1975                                 return -EFAULT;
1976                         }
1977
1978                         PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
1979                                         filter->queue,
1980                                         filter_info->pkt_filters[i].enable);
1981                 }
1982
1983                 break;
1984
1985         case IPPROTO_VRRP:
1986                 ret_fw = hinic_set_vrrp_tcam(nic_dev);
1987                 if (ret_fw) {
1988                         PMD_DRV_LOG(ERR, "Set VRRP failed, "
1989                                 "type: 0x%x, qid: 0x%x, enable: 0x%x",
1990                                 filter_info->pkt_type, filter->queue,
1991                                 filter_info->pkt_filters[i].enable);
1992                         return -EFAULT;
1993                 }
1994                 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
1995                                 filter->queue,
1996                                 filter_info->pkt_filters[i].enable);
1997                 break;
1998
1999         default:
2000                 break;
2001         }
2002
2003         return 0;
2004 }
2005
2006 /*
2007  * Remove a 5tuple filter
2008  *
2009  * @param dev
2010  *  Pointer to struct rte_eth_dev.
2011  * @param filter
2012  *  The pointer of the filter will be removed.
2013  */
2014 static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
2015                            struct hinic_5tuple_filter *filter)
2016 {
2017         struct hinic_filter_info *filter_info =
2018                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2019         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2020
2021         switch (filter->filter_info.proto) {
2022         case IPPROTO_VRRP:
2023                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
2024                 break;
2025
2026         case IPPROTO_TCP:
2027                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
2028                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2029                                                         TCAM_PKT_BGP_DPORT);
2030                 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
2031                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2032                                                         TCAM_PKT_BGP_SPORT);
2033                 break;
2034
2035         default:
2036                 break;
2037         }
2038
2039         hinic_filter_info_init(filter, filter_info);
2040
2041         filter_info->pkt_filters[filter->index].enable = false;
2042         filter_info->pkt_filters[filter->index].pkt_proto = 0;
2043
2044         PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2045                 filter_info->pkt_type,
2046                 filter_info->pkt_filters[filter->index].qid,
2047                 filter_info->pkt_filters[filter->index].enable);
2048         (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2049                                 filter_info->pkt_filters[filter->index].qid,
2050                                 filter_info->pkt_filters[filter->index].enable,
2051                                 true);
2052
2053         filter_info->pkt_type = 0;
2054         filter_info->qid = 0;
2055         filter_info->pkt_filters[filter->index].qid = 0;
2056         filter_info->type_mask &= ~(1 <<  (filter->index));
2057         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
2058
2059         rte_free(filter);
2060 }
2061
2062 /*
2063  * Add or delete a ntuple filter
2064  *
2065  * @param dev
2066  *  Pointer to struct rte_eth_dev.
2067  * @param ntuple_filter
2068  *  Pointer to struct rte_eth_ntuple_filter
2069  * @param add
2070  *  If true, add filter; if false, remove filter
2071  * @return
2072  *    - On success, zero.
2073  *    - On failure, a negative value.
2074  */
2075 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
2076                                 struct rte_eth_ntuple_filter *ntuple_filter,
2077                                 bool add)
2078 {
2079         struct hinic_filter_info *filter_info =
2080                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2081         struct hinic_5tuple_filter_info filter_5tuple;
2082         struct hinic_5tuple_filter *filter;
2083         int ret;
2084
2085         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
2086                 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
2087                 return -EINVAL;
2088         }
2089
2090         memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
2091         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
2092         if (ret < 0)
2093                 return ret;
2094
2095         filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
2096                                          &filter_5tuple);
2097         if (filter != NULL && add) {
2098                 PMD_DRV_LOG(ERR, "Filter exists.");
2099                 return -EEXIST;
2100         }
2101         if (filter == NULL && !add) {
2102                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2103                 return -ENOENT;
2104         }
2105
2106         if (add) {
2107                 filter = rte_zmalloc("hinic_5tuple_filter",
2108                                 sizeof(struct hinic_5tuple_filter), 0);
2109                 if (filter == NULL)
2110                         return -ENOMEM;
2111                 rte_memcpy(&filter->filter_info, &filter_5tuple,
2112                                 sizeof(struct hinic_5tuple_filter_info));
2113                 filter->queue = ntuple_filter->queue;
2114
2115                 filter_info->qid = ntuple_filter->queue;
2116
2117                 ret = hinic_add_5tuple_filter(dev, filter);
2118                 if (ret)
2119                         rte_free(filter);
2120
2121                 return ret;
2122         }
2123
2124         hinic_remove_5tuple_filter(dev, filter);
2125
2126         return 0;
2127 }
2128
2129 static inline int
2130 hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
2131 {
2132         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
2133                 return -EINVAL;
2134
2135         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2136                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
2137                 PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
2138                         " ethertype filter", filter->ether_type);
2139                 return -EINVAL;
2140         }
2141
2142         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
2143                 PMD_DRV_LOG(ERR, "Mac compare is not supported");
2144                 return -EINVAL;
2145         }
2146         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2147                 PMD_DRV_LOG(ERR, "Drop option is not supported");
2148                 return -EINVAL;
2149         }
2150
2151         return 0;
2152 }
2153
2154 static inline int
2155 hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
2156                               struct hinic_pkt_filter *ethertype_filter)
2157 {
2158         switch (ethertype_filter->pkt_proto) {
2159         case RTE_ETHER_TYPE_SLOW:
2160                 filter_info->pkt_type = PKT_LACP_TYPE;
2161                 break;
2162
2163         case RTE_ETHER_TYPE_ARP:
2164                 filter_info->pkt_type = PKT_ARP_TYPE;
2165                 break;
2166
2167         default:
2168                 PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
2169                 return -EIO;
2170         }
2171
2172         return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
2173 }
2174
2175 static inline int
2176 hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
2177                               struct hinic_pkt_filter *ethertype_filter)
2178 {
2179         int id;
2180
2181         /* Find LACP or VRRP type id */
2182         id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
2183         if (id < 0)
2184                 return -EINVAL;
2185
2186         if (!(filter_info->type_mask & (1 << id))) {
2187                 filter_info->type_mask |= 1 << id;
2188                 filter_info->pkt_filters[id].pkt_proto =
2189                         ethertype_filter->pkt_proto;
2190                 filter_info->pkt_filters[id].enable = ethertype_filter->enable;
2191                 filter_info->qid = ethertype_filter->qid;
2192                 return id;
2193         }
2194
2195         PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
2196         return -EINVAL;
2197 }
2198
2199 static inline void
2200 hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
2201                               uint8_t idx)
2202 {
2203         if (idx >= HINIC_MAX_Q_FILTERS)
2204                 return;
2205
2206         filter_info->pkt_type = 0;
2207         filter_info->type_mask &= ~(1 << idx);
2208         filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
2209         filter_info->pkt_filters[idx].enable = FALSE;
2210         filter_info->pkt_filters[idx].qid = 0;
2211 }
2212
2213 static inline int
2214 hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
2215                                struct rte_eth_ethertype_filter *filter,
2216                                bool add)
2217 {
2218         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2219         struct hinic_filter_info *filter_info =
2220                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2221         struct hinic_pkt_filter ethertype_filter;
2222         int i;
2223         int ret_fw;
2224
2225         if (hinic_check_ethertype_filter(filter))
2226                 return -EINVAL;
2227
2228         if (add) {
2229                 ethertype_filter.pkt_proto = filter->ether_type;
2230                 ethertype_filter.enable = TRUE;
2231                 ethertype_filter.qid = (u8)filter->queue;
2232                 i = hinic_ethertype_filter_insert(filter_info,
2233                                                     &ethertype_filter);
2234                 if (i < 0)
2235                         return -ENOSPC;
2236
2237                 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
2238                                 filter_info->pkt_type, filter_info->qid,
2239                                 filter_info->pkt_filters[i].enable, true);
2240                 if (ret_fw) {
2241                         PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2242                                 filter_info->pkt_type, filter->queue,
2243                                 filter_info->pkt_filters[i].enable);
2244
2245                         hinic_ethertype_filter_remove(filter_info, i);
2246                         return -ENOENT;
2247                 }
2248                 PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2249                                 filter_info->pkt_type, filter->queue,
2250                                 filter_info->pkt_filters[i].enable);
2251
2252                 switch (ethertype_filter.pkt_proto) {
2253                 case RTE_ETHER_TYPE_SLOW:
2254                         ret_fw = hinic_set_lacp_tcam(nic_dev);
2255                         if (ret_fw) {
2256                                 PMD_DRV_LOG(ERR, "Add lacp tcam failed");
2257                                 hinic_ethertype_filter_remove(filter_info, i);
2258                                 return -ENOENT;
2259                         }
2260
2261                         PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
2262                         break;
2263                 default:
2264                         break;
2265                 }
2266         } else {
2267                 ethertype_filter.pkt_proto = filter->ether_type;
2268                 i = hinic_ethertype_filter_lookup(filter_info,
2269                                                 &ethertype_filter);
2270
2271                 if ((filter_info->type_mask & (1 << i))) {
2272                         filter_info->pkt_filters[i].enable = FALSE;
2273                         (void)hinic_set_fdir_filter(nic_dev->hwdev,
2274                                         filter_info->pkt_type,
2275                                         filter_info->pkt_filters[i].qid,
2276                                         filter_info->pkt_filters[i].enable,
2277                                         true);
2278
2279                         PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2280                                         filter_info->pkt_type,
2281                                         filter_info->pkt_filters[i].qid,
2282                                         filter_info->pkt_filters[i].enable);
2283
2284                         switch (ethertype_filter.pkt_proto) {
2285                         case RTE_ETHER_TYPE_SLOW:
2286                                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2287                                                                 TCAM_PKT_LACP);
2288                                 PMD_DRV_LOG(INFO,
2289                                         "Del lacp tcam succeed");
2290                                 break;
2291                         default:
2292                                 break;
2293                         }
2294
2295                         hinic_ethertype_filter_remove(filter_info, i);
2296
2297                 } else {
2298                         PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
2299                                         filter_info->pkt_type, filter->queue,
2300                                         filter_info->pkt_filters[i].enable);
2301                         return -ENOENT;
2302                 }
2303         }
2304
2305         return 0;
2306 }
2307
2308 static int hinic_fdir_info_init(struct hinic_fdir_rule *rule,
2309                                 struct hinic_fdir_info *fdir_info)
2310 {
2311         switch (rule->mask.src_ipv4_mask) {
2312         case UINT32_MAX:
2313                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
2314                 fdir_info->qid = rule->queue;
2315                 fdir_info->fdir_key = rule->hinic_fdir.src_ip;
2316                 return 0;
2317
2318         case 0:
2319                 break;
2320
2321         default:
2322                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
2323                 return -EINVAL;
2324         }
2325
2326         switch (rule->mask.dst_ipv4_mask) {
2327         case UINT32_MAX:
2328                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
2329                 fdir_info->qid = rule->queue;
2330                 fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
2331                 return 0;
2332
2333         case 0:
2334                 break;
2335
2336         default:
2337                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
2338                 return -EINVAL;
2339         }
2340
2341         if (fdir_info->fdir_flag == 0) {
2342                 PMD_DRV_LOG(ERR, "All support mask is NULL.");
2343                 return -EINVAL;
2344         }
2345
2346         return 0;
2347 }
2348
2349 static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
2350                                         struct hinic_fdir_rule *rule, bool add)
2351 {
2352         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2353         struct hinic_fdir_info fdir_info;
2354         int ret;
2355
2356         memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
2357
2358         ret = hinic_fdir_info_init(rule, &fdir_info);
2359         if (ret) {
2360                 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2361                 return ret;
2362         }
2363
2364         if (add) {
2365                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2366                                                 true, fdir_info.fdir_key,
2367                                                 true, fdir_info.fdir_flag);
2368                 if (ret) {
2369                         PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2370                                         fdir_info.fdir_flag, fdir_info.qid,
2371                                         fdir_info.fdir_key);
2372                         return -ENOENT;
2373                 }
2374                 PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2375                                 fdir_info.fdir_flag, fdir_info.qid,
2376                                 fdir_info.fdir_key);
2377         } else {
2378                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2379                                                 false, fdir_info.fdir_key, true,
2380                                                 fdir_info.fdir_flag);
2381                 if (ret) {
2382                         PMD_DRV_LOG(ERR, "Del fdir filter ailed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2383                                 fdir_info.fdir_flag, fdir_info.qid,
2384                                 fdir_info.fdir_key);
2385                         return -ENOENT;
2386                 }
2387                 PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2388                                 fdir_info.fdir_flag, fdir_info.qid,
2389                                 fdir_info.fdir_key);
2390         }
2391
2392         return 0;
2393 }
2394
2395 static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len)
2396 {
2397         u8 idx;
2398
2399         for (idx = 0; idx < len; idx++)
2400                 key_y[idx] = src_input[idx] & mask[idx];
2401 }
2402
2403 static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len)
2404 {
2405         u8 idx;
2406
2407         for (idx = 0; idx < len; idx++)
2408                 key_x[idx] = key_y[idx] ^ mask[idx];
2409 }
2410
2411 static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
2412                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2413 {
2414         tcam_translate_key_y(fdir_tcam_rule->key.y,
2415                 (u8 *)(&tcam_key->key_info),
2416                 (u8 *)(&tcam_key->key_mask),
2417                 TCAM_FLOW_KEY_SIZE);
2418         tcam_translate_key_x(fdir_tcam_rule->key.x,
2419                 fdir_tcam_rule->key.y,
2420                 (u8 *)(&tcam_key->key_mask),
2421                 TCAM_FLOW_KEY_SIZE);
2422 }
2423
2424 static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
2425                                 struct hinic_fdir_rule *rule,
2426                                 struct tag_tcam_key *tcam_key,
2427                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2428 {
2429         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2430
2431         switch (rule->mask.dst_ipv4_mask) {
2432         case UINT32_MAX:
2433                 tcam_key->key_info.ext_dip_h =
2434                         (rule->hinic_fdir.dst_ip >> 16) & 0xffffU;
2435                 tcam_key->key_info.ext_dip_l =
2436                         rule->hinic_fdir.dst_ip & 0xffffU;
2437                 tcam_key->key_mask.ext_dip_h =
2438                         (rule->mask.dst_ipv4_mask >> 16) & 0xffffU;
2439                 tcam_key->key_mask.ext_dip_l =
2440                         rule->mask.dst_ipv4_mask & 0xffffU;
2441                 break;
2442
2443         case 0:
2444                 break;
2445
2446         default:
2447                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2448                 return -EINVAL;
2449         }
2450
2451         if (rule->mask.dst_port_mask > 0) {
2452                 tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port;
2453                 tcam_key->key_mask.dst_port = rule->mask.dst_port_mask;
2454         }
2455
2456         if (rule->mask.src_port_mask > 0) {
2457                 tcam_key->key_info.src_port = rule->hinic_fdir.src_port;
2458                 tcam_key->key_mask.src_port = rule->mask.src_port_mask;
2459         }
2460
2461         switch (rule->mask.tunnel_flag) {
2462         case UINT16_MAX:
2463                 tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET;
2464                 tcam_key->key_mask.tunnel_flag = UINT8_MAX;
2465                 break;
2466
2467         case 0:
2468                 tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET;
2469                 tcam_key->key_mask.tunnel_flag = 0;
2470                 break;
2471
2472         default:
2473                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2474                 return -EINVAL;
2475         }
2476
2477         if (rule->mask.tunnel_inner_dst_port_mask > 0) {
2478                 tcam_key->key_info.dst_port =
2479                                         rule->hinic_fdir.tunnel_inner_dst_port;
2480                 tcam_key->key_mask.dst_port =
2481                                         rule->mask.tunnel_inner_dst_port_mask;
2482         }
2483
2484         if (rule->mask.tunnel_inner_src_port_mask > 0) {
2485                 tcam_key->key_info.src_port =
2486                                         rule->hinic_fdir.tunnel_inner_src_port;
2487                 tcam_key->key_mask.src_port =
2488                                         rule->mask.tunnel_inner_src_port_mask;
2489         }
2490
2491         switch (rule->mask.proto_mask) {
2492         case UINT16_MAX:
2493                 tcam_key->key_info.protocol = rule->hinic_fdir.proto;
2494                 tcam_key->key_mask.protocol = UINT8_MAX;
2495                 break;
2496
2497         case 0:
2498                 break;
2499
2500         default:
2501                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2502                 return -EINVAL;
2503         }
2504
2505         tcam_key->key_mask.function_id = UINT16_MAX;
2506
2507         tcam_key->key_info.function_id = hinic_global_func_id(nic_dev->hwdev);
2508
2509         fdir_tcam_rule->data.qid = rule->queue;
2510
2511         tcam_key_calculate(tcam_key, fdir_tcam_rule);
2512
2513         return 0;
2514 }
2515
2516 static inline struct hinic_tcam_filter *
2517 hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list,
2518                         struct tag_tcam_key *key)
2519 {
2520         struct hinic_tcam_filter *it;
2521
2522         TAILQ_FOREACH(it, filter_list, entries) {
2523                 if (memcmp(key, &it->tcam_key,
2524                         sizeof(struct tag_tcam_key)) == 0) {
2525                         return it;
2526                 }
2527         }
2528
2529         return NULL;
2530 }
2531
2532 static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev,
2533                                         struct hinic_tcam_info *tcam_info,
2534                                         struct hinic_tcam_filter *tcam_filter,
2535                                         u16 *tcam_index)
2536 {
2537         int index;
2538         int max_index;
2539         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2540
2541         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2542                 max_index = HINIC_VF_MAX_TCAM_FILTERS;
2543         else
2544                 max_index = HINIC_PF_MAX_TCAM_FILTERS;
2545
2546         for (index = 0; index < max_index; index++) {
2547                 if (tcam_info->tcam_index_array[index] == 0)
2548                         break;
2549         }
2550
2551         if (index == max_index) {
2552                 PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules",
2553                         hinic_global_func_id(nic_dev->hwdev), max_index);
2554                 return -EINVAL;
2555         }
2556
2557         tcam_filter->index = index;
2558         *tcam_index = index;
2559
2560         return 0;
2561 }
2562
2563 static int hinic_add_tcam_filter(struct rte_eth_dev *dev,
2564                                 struct hinic_tcam_filter *tcam_filter,
2565                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2566 {
2567         struct hinic_tcam_info *tcam_info =
2568                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2569         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2570         u16 index = 0;
2571         u16 tcam_block_index = 0;
2572         int rc;
2573
2574         if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index))
2575                 return -EINVAL;
2576
2577         if (tcam_info->tcam_rule_nums == 0) {
2578                 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2579                         rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2580                                 HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index);
2581                         if (rc != 0) {
2582                                 PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block failed!");
2583                                 return -EFAULT;
2584                         }
2585                 } else {
2586                         rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2587                                 HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index);
2588                         if (rc != 0) {
2589                                 PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block failed!");
2590                                 return -EFAULT;
2591                         }
2592                 }
2593
2594                 tcam_info->tcam_block_index = tcam_block_index;
2595         } else {
2596                 tcam_block_index = tcam_info->tcam_block_index;
2597         }
2598
2599         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2600                 fdir_tcam_rule->index =
2601                         HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index;
2602         } else {
2603                 fdir_tcam_rule->index =
2604                         tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index;
2605         }
2606
2607         rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule);
2608         if (rc != 0) {
2609                 PMD_DRV_LOG(ERR, "Fdir_tcam_rule add failed!");
2610                 return -EFAULT;
2611         }
2612
2613         PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x,"
2614                 "tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succeed",
2615                 hinic_global_func_id(nic_dev->hwdev), tcam_block_index,
2616                 fdir_tcam_rule->index, fdir_tcam_rule->data.qid,
2617                 tcam_info->tcam_rule_nums + 1);
2618
2619         if (tcam_info->tcam_rule_nums == 0) {
2620                 rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true);
2621                 if (rc < 0) {
2622                         (void)hinic_del_tcam_rule(nic_dev->hwdev,
2623                                                 fdir_tcam_rule->index);
2624                         return rc;
2625                 }
2626         }
2627
2628         TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries);
2629
2630         tcam_info->tcam_index_array[index] = 1;
2631         tcam_info->tcam_rule_nums++;
2632
2633         return 0;
2634 }
2635
2636 static int hinic_del_tcam_filter(struct rte_eth_dev *dev,
2637                                 struct hinic_tcam_filter *tcam_filter)
2638 {
2639         struct hinic_tcam_info *tcam_info =
2640                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2641         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2642         u32 index = 0;
2643         u16 tcam_block_index = tcam_info->tcam_block_index;
2644         int rc;
2645         u8 block_type = 0;
2646
2647         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2648                 index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) +
2649                         tcam_filter->index;
2650                 block_type = HINIC_TCAM_BLOCK_TYPE_VF;
2651         } else {
2652                 index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS +
2653                         tcam_filter->index;
2654                 block_type = HINIC_TCAM_BLOCK_TYPE_PF;
2655         }
2656
2657         rc = hinic_del_tcam_rule(nic_dev->hwdev, index);
2658         if (rc != 0) {
2659                 PMD_DRV_LOG(ERR, "fdir_tcam_rule del failed!");
2660                 return -EFAULT;
2661         }
2662
2663         PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, "
2664                 "tcam_block_id: %d, index: %d, tcam_rule_nums: %d succeed",
2665                 hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index,
2666                 tcam_info->tcam_rule_nums - 1);
2667
2668         TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries);
2669
2670         tcam_info->tcam_index_array[tcam_filter->index] = 0;
2671
2672         rte_free(tcam_filter);
2673
2674         tcam_info->tcam_rule_nums--;
2675
2676         if (tcam_info->tcam_rule_nums == 0) {
2677                 (void)hinic_free_tcam_block(nic_dev->hwdev, block_type,
2678                                         &tcam_block_index);
2679         }
2680
2681         return 0;
2682 }
2683
2684 static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
2685                                         struct hinic_fdir_rule *rule, bool add)
2686 {
2687         struct hinic_tcam_info *tcam_info =
2688                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2689         struct hinic_tcam_filter *tcam_filter;
2690         struct tag_tcam_cfg_rule fdir_tcam_rule;
2691         struct tag_tcam_key tcam_key;
2692         int ret;
2693
2694         memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule));
2695         memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key));
2696
2697         ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
2698         if (ret) {
2699                 PMD_DRV_LOG(ERR, "Init hiovs fdir info failed!");
2700                 return ret;
2701         }
2702
2703         tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list,
2704                                                 &tcam_key);
2705         if (tcam_filter != NULL && add) {
2706                 PMD_DRV_LOG(ERR, "Filter exists.");
2707                 return -EEXIST;
2708         }
2709         if (tcam_filter == NULL && !add) {
2710                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2711                 return -ENOENT;
2712         }
2713
2714         if (add) {
2715                 tcam_filter = rte_zmalloc("hiovs_5tuple_filter",
2716                                 sizeof(struct hinic_tcam_filter), 0);
2717                 if (tcam_filter == NULL)
2718                         return -ENOMEM;
2719                 (void)rte_memcpy(&tcam_filter->tcam_key,
2720                                  &tcam_key, sizeof(struct tag_tcam_key));
2721                 tcam_filter->queue = fdir_tcam_rule.data.qid;
2722
2723                 ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule);
2724                 if (ret < 0) {
2725                         rte_free(tcam_filter);
2726                         return ret;
2727                 }
2728
2729                 rule->tcam_index = fdir_tcam_rule.index;
2730
2731         } else {
2732                 PMD_DRV_LOG(ERR, "Begin to hiovs_del_tcam_filter");
2733                 ret = hinic_del_tcam_filter(dev, tcam_filter);
2734                 if (ret < 0)
2735                         return ret;
2736         }
2737
2738         return 0;
2739 }
2740
2741 /**
2742  * Create or destroy a flow rule.
2743  * Theorically one rule can match more than one filters.
2744  * We will let it use the filter which it hitt first.
2745  * So, the sequence matters.
2746  */
2747 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
2748                                         const struct rte_flow_attr *attr,
2749                                         const struct rte_flow_item pattern[],
2750                                         const struct rte_flow_action actions[],
2751                                         struct rte_flow_error *error)
2752 {
2753         int ret;
2754         struct rte_eth_ntuple_filter ntuple_filter;
2755         struct rte_eth_ethertype_filter ethertype_filter;
2756         struct hinic_fdir_rule fdir_rule;
2757         struct rte_flow *flow = NULL;
2758         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2759         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2760         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2761         struct hinic_flow_mem *hinic_flow_mem_ptr;
2762         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2763
2764         flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
2765         if (!flow) {
2766                 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
2767                 return NULL;
2768         }
2769
2770         hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
2771                         sizeof(struct hinic_flow_mem), 0);
2772         if (!hinic_flow_mem_ptr) {
2773                 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
2774                 rte_free(flow);
2775                 return NULL;
2776         }
2777
2778         hinic_flow_mem_ptr->flow = flow;
2779         TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2780                                 entries);
2781
2782         /* Add ntuple filter */
2783         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2784         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
2785                         actions, &ntuple_filter, error);
2786         if (!ret) {
2787                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2788                 if (!ret) {
2789                         ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
2790                                 sizeof(struct hinic_ntuple_filter_ele), 0);
2791                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2792                                    &ntuple_filter,
2793                                    sizeof(struct rte_eth_ntuple_filter));
2794                         TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
2795                         ntuple_filter_ptr, entries);
2796                         flow->rule = ntuple_filter_ptr;
2797                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2798
2799                         PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
2800                         hinic_global_func_id(nic_dev->hwdev));
2801                         return flow;
2802                 }
2803                 goto out;
2804         }
2805
2806         /* Add ethertype filter */
2807         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2808         ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
2809                                         &ethertype_filter, error);
2810         if (!ret) {
2811                 ret = hinic_add_del_ethertype_filter(dev, &ethertype_filter,
2812                                                      TRUE);
2813                 if (!ret) {
2814                         ethertype_filter_ptr =
2815                                 rte_zmalloc("hinic_ethertype_filter",
2816                                 sizeof(struct hinic_ethertype_filter_ele), 0);
2817                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2818                                 &ethertype_filter,
2819                                 sizeof(struct rte_eth_ethertype_filter));
2820                         TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
2821                                 ethertype_filter_ptr, entries);
2822                         flow->rule = ethertype_filter_ptr;
2823                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2824
2825                         PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
2826                                         hinic_global_func_id(nic_dev->hwdev));
2827                         return flow;
2828                 }
2829                 goto out;
2830         }
2831
2832         /* Add fdir filter */
2833         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
2834         ret = hinic_parse_fdir_filter(dev, attr, pattern,
2835                                       actions, &fdir_rule, error);
2836         if (!ret) {
2837                 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
2838                         ret = hinic_add_del_fdir_filter(dev,
2839                                         &fdir_rule, TRUE);
2840                 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
2841                         ret = hinic_add_del_tcam_fdir_filter(dev,
2842                                         &fdir_rule, TRUE);
2843                 }  else {
2844                         PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong");
2845                         goto out;
2846                 }
2847                 if (!ret) {
2848                         fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
2849                                 sizeof(struct hinic_fdir_rule_ele), 0);
2850                         rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
2851                                 sizeof(struct hinic_fdir_rule));
2852                         TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
2853                                 fdir_rule_ptr, entries);
2854                         flow->rule = fdir_rule_ptr;
2855                         flow->filter_type = RTE_ETH_FILTER_FDIR;
2856
2857                         PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
2858                                         hinic_global_func_id(nic_dev->hwdev));
2859                         return flow;
2860                 }
2861                 goto out;
2862         }
2863
2864 out:
2865         TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
2866         rte_flow_error_set(error, -ret,
2867                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2868                            "Failed to create flow.");
2869         rte_free(hinic_flow_mem_ptr);
2870         rte_free(flow);
2871         return NULL;
2872 }
2873
2874 /* Destroy a flow rule on hinic. */
2875 static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2876                                 struct rte_flow_error *error)
2877 {
2878         int ret;
2879         struct rte_flow *pmd_flow = flow;
2880         enum rte_filter_type filter_type = pmd_flow->filter_type;
2881         struct rte_eth_ntuple_filter ntuple_filter;
2882         struct rte_eth_ethertype_filter ethertype_filter;
2883         struct hinic_fdir_rule fdir_rule;
2884         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2885         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2886         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2887         struct hinic_flow_mem *hinic_flow_mem_ptr;
2888         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2889
2890         switch (filter_type) {
2891         case RTE_ETH_FILTER_NTUPLE:
2892                 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
2893                                         pmd_flow->rule;
2894                 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
2895                         sizeof(struct rte_eth_ntuple_filter));
2896                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2897                 if (!ret) {
2898                         TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
2899                                 ntuple_filter_ptr, entries);
2900                         rte_free(ntuple_filter_ptr);
2901                 }
2902                 break;
2903         case RTE_ETH_FILTER_ETHERTYPE:
2904                 ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
2905                                         pmd_flow->rule;
2906                 rte_memcpy(&ethertype_filter,
2907                         &ethertype_filter_ptr->filter_info,
2908                         sizeof(struct rte_eth_ethertype_filter));
2909                 ret = hinic_add_del_ethertype_filter(dev,
2910                                 &ethertype_filter, FALSE);
2911                 if (!ret) {
2912                         TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
2913                                 ethertype_filter_ptr, entries);
2914                         rte_free(ethertype_filter_ptr);
2915                 }
2916                 break;
2917         case RTE_ETH_FILTER_FDIR:
2918                 fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
2919                 rte_memcpy(&fdir_rule,
2920                         &fdir_rule_ptr->filter_info,
2921                         sizeof(struct hinic_fdir_rule));
2922                 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
2923                         ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
2924                 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
2925                         ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
2926                                                                 FALSE);
2927                 } else {
2928                         PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!");
2929                         ret = -EINVAL;
2930                 }
2931                 if (!ret) {
2932                         TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
2933                                 fdir_rule_ptr, entries);
2934                         rte_free(fdir_rule_ptr);
2935                 }
2936                 break;
2937         default:
2938                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2939                         filter_type);
2940                 ret = -EINVAL;
2941                 break;
2942         }
2943
2944         if (ret) {
2945                 rte_flow_error_set(error, EINVAL,
2946                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2947                                 NULL, "Failed to destroy flow");
2948                 return ret;
2949         }
2950
2951         TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
2952                 if (hinic_flow_mem_ptr->flow == pmd_flow) {
2953                         TAILQ_REMOVE(&nic_dev->hinic_flow_list,
2954                                 hinic_flow_mem_ptr, entries);
2955                         rte_free(hinic_flow_mem_ptr);
2956                         break;
2957                 }
2958         }
2959         rte_free(flow);
2960
2961         PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
2962                         hinic_global_func_id(nic_dev->hwdev));
2963
2964         return ret;
2965 }
2966
2967 /* Remove all the n-tuple filters */
2968 static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev)
2969 {
2970         struct hinic_filter_info *filter_info =
2971                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2972         struct hinic_5tuple_filter *p_5tuple;
2973
2974         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
2975                 hinic_remove_5tuple_filter(dev, p_5tuple);
2976 }
2977
2978 /* Remove all the ether type filters */
2979 static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
2980 {
2981         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2982         struct hinic_filter_info *filter_info =
2983                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
2984         int ret = 0;
2985
2986         if (filter_info->type_mask &
2987                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) {
2988                 hinic_ethertype_filter_remove(filter_info,
2989                         HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE));
2990                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE,
2991                                         filter_info->qid, false, true);
2992
2993                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
2994         }
2995
2996         if (filter_info->type_mask &
2997                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) {
2998                 hinic_ethertype_filter_remove(filter_info,
2999                         HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE));
3000                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE,
3001                         filter_info->qid, false, true);
3002         }
3003
3004         if (ret)
3005                 PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x",
3006                                 filter_info->pkt_type);
3007 }
3008
3009 /* Remove all the ether type filters */
3010 static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
3011 {
3012         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3013         struct hinic_tcam_info *tcam_info =
3014                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
3015         struct hinic_tcam_filter *tcam_filter_ptr;
3016
3017         while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list)))
3018                 (void)hinic_del_tcam_filter(dev, tcam_filter_ptr);
3019
3020         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
3021
3022         (void)hinic_flush_tcam_rule(nic_dev->hwdev);
3023 }
3024
3025 static void hinic_filterlist_flush(struct rte_eth_dev *dev)
3026 {
3027         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3028         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3029         struct hinic_fdir_rule_ele *fdir_rule_ptr;
3030         struct hinic_flow_mem *hinic_flow_mem_ptr;
3031         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3032
3033         while ((ntuple_filter_ptr =
3034                         TAILQ_FIRST(&nic_dev->filter_ntuple_list))) {
3035                 TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr,
3036                                  entries);
3037                 rte_free(ntuple_filter_ptr);
3038         }
3039
3040         while ((ethertype_filter_ptr =
3041                         TAILQ_FIRST(&nic_dev->filter_ethertype_list))) {
3042                 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3043                                 ethertype_filter_ptr,
3044                                 entries);
3045                 rte_free(ethertype_filter_ptr);
3046         }
3047
3048         while ((fdir_rule_ptr =
3049                         TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) {
3050                 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr,
3051                                  entries);
3052                 rte_free(fdir_rule_ptr);
3053         }
3054
3055         while ((hinic_flow_mem_ptr =
3056                         TAILQ_FIRST(&nic_dev->hinic_flow_list))) {
3057                 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
3058                                  entries);
3059                 rte_free(hinic_flow_mem_ptr->flow);
3060                 rte_free(hinic_flow_mem_ptr);
3061         }
3062 }
3063
3064 /* Destroy all flow rules associated with a port on hinic. */
3065 static int hinic_flow_flush(struct rte_eth_dev *dev,
3066                                 __rte_unused struct rte_flow_error *error)
3067 {
3068         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3069
3070         hinic_clear_all_ntuple_filter(dev);
3071         hinic_clear_all_ethertype_filter(dev);
3072         hinic_clear_all_fdir_filter(dev);
3073         hinic_filterlist_flush(dev);
3074
3075         PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x",
3076                         hinic_global_func_id(nic_dev->hwdev));
3077         return 0;
3078 }
3079
3080 void hinic_destroy_fdir_filter(struct rte_eth_dev *dev)
3081 {
3082         hinic_clear_all_ntuple_filter(dev);
3083         hinic_clear_all_ethertype_filter(dev);
3084         hinic_clear_all_fdir_filter(dev);
3085         hinic_filterlist_flush(dev);
3086 }
3087
3088 const struct rte_flow_ops hinic_flow_ops = {
3089         .validate = hinic_flow_validate,
3090         .create = hinic_flow_create,
3091         .destroy = hinic_flow_destroy,
3092         .flush = hinic_flow_flush,
3093 };
3094