net/mlx5: fix packet length assert in MPRQ
[dpdk.git] / drivers / net / hinic / hinic_pmd_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
25
26 #define HINIC_MAX_RX_QUEUE_NUM          64
27
28 #ifndef UINT8_MAX
29 #define UINT8_MAX          (u8)(~((u8)0))       /* 0xFF               */
30 #define UINT16_MAX         (u16)(~((u16)0))     /* 0xFFFF             */
31 #define UINT32_MAX         (u32)(~((u32)0))     /* 0xFFFFFFFF         */
32 #define UINT64_MAX         (u64)(~((u64)0))     /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX          (0x7F)
34 #endif
35
36 /* IPSURX MACRO */
37 #define PA_ETH_TYPE_ROCE                0
38 #define PA_ETH_TYPE_IPV4                1
39 #define PA_ETH_TYPE_IPV6                2
40 #define PA_ETH_TYPE_OTHER               3
41
42 #define PA_IP_PROTOCOL_TYPE_TCP         1
43 #define PA_IP_PROTOCOL_TYPE_UDP         2
44 #define PA_IP_PROTOCOL_TYPE_ICMP        3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP   4
46 #define PA_IP_PROTOCOL_TYPE_SCTP        5
47 #define PA_IP_PROTOCOL_TYPE_VRRP        112
48
49 #define IP_HEADER_PROTOCOL_TYPE_TCP     6
50 #define IP_HEADER_PROTOCOL_TYPE_UDP     17
51 #define IP_HEADER_PROTOCOL_TYPE_ICMP    1
52
53 #define FDIR_TCAM_NORMAL_PACKET         0
54 #define FDIR_TCAM_TUNNEL_PACKET         1
55
56 #define HINIC_MIN_N_TUPLE_PRIO          1
57 #define HINIC_MAX_N_TUPLE_PRIO          7
58
59 /* TCAM type mask in hardware */
60 #define TCAM_PKT_BGP_SPORT      1
61 #define TCAM_PKT_VRRP           2
62 #define TCAM_PKT_BGP_DPORT      3
63 #define TCAM_PKT_LACP           4
64
65 #define BGP_DPORT_ID            179
66 #define IPPROTO_VRRP            112
67
68 /* Packet type defined in hardware to perform filter */
69 #define PKT_IGMP_IPV4_TYPE     64
70 #define PKT_ICMP_IPV4_TYPE     65
71 #define PKT_ICMP_IPV6_TYPE     66
72 #define PKT_ICMP_IPV6RS_TYPE   67
73 #define PKT_ICMP_IPV6RA_TYPE   68
74 #define PKT_ICMP_IPV6NS_TYPE   69
75 #define PKT_ICMP_IPV6NA_TYPE   70
76 #define PKT_ICMP_IPV6RE_TYPE   71
77 #define PKT_DHCP_IPV4_TYPE     72
78 #define PKT_DHCP_IPV6_TYPE     73
79 #define PKT_LACP_TYPE          74
80 #define PKT_ARP_REQ_TYPE       79
81 #define PKT_ARP_REP_TYPE       80
82 #define PKT_ARP_TYPE           81
83 #define PKT_BGPD_DPORT_TYPE    83
84 #define PKT_BGPD_SPORT_TYPE    84
85 #define PKT_VRRP_TYPE          85
86
87 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
88         (&((struct hinic_nic_dev *)nic_dev)->filter)
89
90 #define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \
91         (&((struct hinic_nic_dev *)nic_dev)->tcam)
92
93
94 enum hinic_atr_flow_type {
95         HINIC_ATR_FLOW_TYPE_IPV4_DIP    = 0x1,
96         HINIC_ATR_FLOW_TYPE_IPV4_SIP    = 0x2,
97         HINIC_ATR_FLOW_TYPE_DPORT       = 0x3,
98         HINIC_ATR_FLOW_TYPE_SPORT       = 0x4,
99 };
100
101 /* Structure to store fdir's info. */
102 struct hinic_fdir_info {
103         uint8_t fdir_flag;
104         uint8_t qid;
105         uint32_t fdir_key;
106 };
107
108 /**
109  * Endless loop will never happen with below assumption
110  * 1. there is at least one no-void item(END)
111  * 2. cur is before END.
112  */
113 static inline const struct rte_flow_item *
114 next_no_void_pattern(const struct rte_flow_item pattern[],
115                 const struct rte_flow_item *cur)
116 {
117         const struct rte_flow_item *next =
118                 cur ? cur + 1 : &pattern[0];
119         while (1) {
120                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
121                         return next;
122                 next++;
123         }
124 }
125
126 static inline const struct rte_flow_action *
127 next_no_void_action(const struct rte_flow_action actions[],
128                 const struct rte_flow_action *cur)
129 {
130         const struct rte_flow_action *next =
131                 cur ? cur + 1 : &actions[0];
132         while (1) {
133                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
134                         return next;
135                 next++;
136         }
137 }
138
139 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
140                                         struct rte_flow_error *error)
141 {
142         /* Must be input direction */
143         if (!attr->ingress) {
144                 rte_flow_error_set(error, EINVAL,
145                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
146                         attr, "Only support ingress.");
147                 return -rte_errno;
148         }
149
150         if (attr->egress) {
151                 rte_flow_error_set(error, EINVAL,
152                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
153                                 attr, "Not support egress.");
154                 return -rte_errno;
155         }
156
157         if (attr->priority) {
158                 rte_flow_error_set(error, EINVAL,
159                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
160                                 attr, "Not support priority.");
161                 return -rte_errno;
162         }
163
164         if (attr->group) {
165                 rte_flow_error_set(error, EINVAL,
166                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
167                                 attr, "Not support group.");
168                 return -rte_errno;
169         }
170
171         return 0;
172 }
173
174 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
175                                 const struct rte_flow_item *pattern,
176                                 const struct rte_flow_action *actions,
177                                 struct rte_flow_error *error)
178 {
179         if (!pattern) {
180                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
181                                 NULL, "NULL pattern.");
182                 return -rte_errno;
183         }
184
185         if (!actions) {
186                 rte_flow_error_set(error, EINVAL,
187                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
188                                 NULL, "NULL action.");
189                 return -rte_errno;
190         }
191
192         if (!attr) {
193                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
194                                    NULL, "NULL attribute.");
195                 return -rte_errno;
196         }
197
198         return 0;
199 }
200
201 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
202                                         struct rte_flow_error *error)
203 {
204         /* The first non-void item should be MAC */
205         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
206                 rte_flow_error_set(error, EINVAL,
207                         RTE_FLOW_ERROR_TYPE_ITEM,
208                         item, "Not supported by ethertype filter");
209                 return -rte_errno;
210         }
211
212         /* Not supported last point for range */
213         if (item->last) {
214                 rte_flow_error_set(error, EINVAL,
215                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
216                         item, "Not supported last point for range");
217                 return -rte_errno;
218         }
219
220         /* Get the MAC info. */
221         if (!item->spec || !item->mask) {
222                 rte_flow_error_set(error, EINVAL,
223                                 RTE_FLOW_ERROR_TYPE_ITEM,
224                                 item, "Not supported by ethertype filter");
225                 return -rte_errno;
226         }
227         return 0;
228 }
229
230 static int
231 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
232                         const struct rte_flow_action *act,
233                         const struct rte_flow_action_queue *act_q,
234                         struct rte_eth_ethertype_filter *filter,
235                         struct rte_flow_error *error)
236 {
237         /* Parse action */
238         act = next_no_void_action(actions, NULL);
239         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
240                 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
241                 rte_flow_error_set(error, EINVAL,
242                                 RTE_FLOW_ERROR_TYPE_ACTION,
243                                 act, "Not supported action.");
244                 return -rte_errno;
245         }
246
247         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
248                 act_q = (const struct rte_flow_action_queue *)act->conf;
249                 filter->queue = act_q->index;
250         } else {
251                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
252         }
253
254         /* Check if the next non-void item is END */
255         act = next_no_void_action(actions, act);
256         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
257                 rte_flow_error_set(error, EINVAL,
258                                 RTE_FLOW_ERROR_TYPE_ACTION,
259                                 act, "Not supported action.");
260                 return -rte_errno;
261         }
262
263         return 0;
264 }
265
266 /**
267  * Parse the rule to see if it is a ethertype rule.
268  * And get the ethertype filter info BTW.
269  * pattern:
270  * The first not void item can be ETH.
271  * The next not void item must be END.
272  * action:
273  * The first not void action should be QUEUE.
274  * The next not void action should be END.
275  * pattern example:
276  * ITEM         Spec                    Mask
277  * ETH          type    0x0807          0xFFFF
278  * END
279  * other members in mask and spec should set to 0x00.
280  * item->last should be NULL.
281  */
282 static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
283                         const struct rte_flow_item *pattern,
284                         const struct rte_flow_action *actions,
285                         struct rte_eth_ethertype_filter *filter,
286                         struct rte_flow_error *error)
287 {
288         const struct rte_flow_item *item;
289         const struct rte_flow_action *act = NULL;
290         const struct rte_flow_item_eth *eth_spec;
291         const struct rte_flow_item_eth *eth_mask;
292         const struct rte_flow_action_queue *act_q = NULL;
293
294         if (hinic_check_filter_arg(attr, pattern, actions, error))
295                 return -rte_errno;
296
297         item = next_no_void_pattern(pattern, NULL);
298         if (hinic_check_ethertype_first_item(item, error))
299                 return -rte_errno;
300
301         eth_spec = (const struct rte_flow_item_eth *)item->spec;
302         eth_mask = (const struct rte_flow_item_eth *)item->mask;
303
304         /*
305          * Mask bits of source MAC address must be full of 0.
306          * Mask bits of destination MAC address must be full
307          * of 1 or full of 0.
308          */
309         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
310             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
311              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
312                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
313                                 item, "Invalid ether address mask");
314                 return -rte_errno;
315         }
316
317         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
318                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
319                                 item, "Invalid ethertype mask");
320                 return -rte_errno;
321         }
322
323         /*
324          * If mask bits of destination MAC address
325          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
326          */
327         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
328                 filter->mac_addr = eth_spec->dst;
329                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
330         } else {
331                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
332         }
333         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
334
335         /* Check if the next non-void item is END. */
336         item = next_no_void_pattern(pattern, item);
337         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
338                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
339                         item, "Not supported by ethertype filter.");
340                 return -rte_errno;
341         }
342
343         if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
344                 return -rte_errno;
345
346         if (hinic_check_ethertype_attr_ele(attr, error))
347                 return -rte_errno;
348
349         return 0;
350 }
351
352 static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
353                         const struct rte_flow_attr *attr,
354                         const struct rte_flow_item pattern[],
355                         const struct rte_flow_action actions[],
356                         struct rte_eth_ethertype_filter *filter,
357                         struct rte_flow_error *error)
358 {
359         if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
360                 return -rte_errno;
361
362         /* NIC doesn't support MAC address. */
363         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
364                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
365                 rte_flow_error_set(error, EINVAL,
366                         RTE_FLOW_ERROR_TYPE_ITEM,
367                         NULL, "Not supported by ethertype filter");
368                 return -rte_errno;
369         }
370
371         if (filter->queue >= dev->data->nb_rx_queues) {
372                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
373                 rte_flow_error_set(error, EINVAL,
374                         RTE_FLOW_ERROR_TYPE_ITEM,
375                         NULL, "Queue index much too big");
376                 return -rte_errno;
377         }
378
379         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
380                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
381                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
382                 rte_flow_error_set(error, EINVAL,
383                         RTE_FLOW_ERROR_TYPE_ITEM,
384                         NULL, "IPv4/IPv6 not supported by ethertype filter");
385                 return -rte_errno;
386         }
387
388         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
389                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
390                 rte_flow_error_set(error, EINVAL,
391                         RTE_FLOW_ERROR_TYPE_ITEM,
392                         NULL, "Drop option is unsupported");
393                 return -rte_errno;
394         }
395
396         /* Hinic only support LACP/ARP for ether type */
397         if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
398                 filter->ether_type != RTE_ETHER_TYPE_ARP) {
399                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
400                 rte_flow_error_set(error, EINVAL,
401                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
402                         "only lacp/arp type supported by ethertype filter");
403                 return -rte_errno;
404         }
405
406         return 0;
407 }
408
409 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
410                                 struct rte_eth_ntuple_filter *filter,
411                                 struct rte_flow_error *error)
412 {
413         /* Must be input direction */
414         if (!attr->ingress) {
415                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
416                 rte_flow_error_set(error, EINVAL,
417                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
418                                    attr, "Only support ingress.");
419                 return -rte_errno;
420         }
421
422         if (attr->egress) {
423                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
424                 rte_flow_error_set(error, EINVAL,
425                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
426                                    attr, "Not support egress.");
427                 return -rte_errno;
428         }
429
430         if (attr->priority > 0xFFFF) {
431                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
432                 rte_flow_error_set(error, EINVAL,
433                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
434                                    attr, "Error priority.");
435                 return -rte_errno;
436         }
437
438         if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
439                     attr->priority > HINIC_MAX_N_TUPLE_PRIO)
440                 filter->priority = 1;
441         else
442                 filter->priority = (uint16_t)attr->priority;
443
444         return 0;
445 }
446
447 static int
448 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
449                         const struct rte_flow_action actions[],
450                         struct rte_eth_ntuple_filter *filter,
451                         struct rte_flow_error *error)
452 {
453         const struct rte_flow_action *act;
454         /*
455          * n-tuple only supports forwarding,
456          * check if the first not void action is QUEUE.
457          */
458         act = next_no_void_action(actions, NULL);
459         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
460                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
461                 rte_flow_error_set(error, EINVAL,
462                         RTE_FLOW_ERROR_TYPE_ACTION,
463                         act, "Flow action type is not QUEUE.");
464                 return -rte_errno;
465         }
466         filter->queue =
467                 ((const struct rte_flow_action_queue *)act->conf)->index;
468
469         /* Check if the next not void item is END */
470         act = next_no_void_action(actions, act);
471         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
472                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473                 rte_flow_error_set(error, EINVAL,
474                         RTE_FLOW_ERROR_TYPE_ACTION,
475                         act, "Next not void item is not END.");
476                 return -rte_errno;
477         }
478
479         return 0;
480 }
481
482 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
483                                         const struct rte_flow_item pattern[],
484                                         struct rte_flow_error *error)
485 {
486         const struct rte_flow_item *item;
487
488         /* The first not void item can be MAC or IPv4 */
489         item = next_no_void_pattern(pattern, NULL);
490
491         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
492                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
493                 rte_flow_error_set(error, EINVAL,
494                         RTE_FLOW_ERROR_TYPE_ITEM,
495                         item, "Not supported by ntuple filter");
496                 return -rte_errno;
497         }
498
499         /* Skip Ethernet */
500         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
501                 /* Not supported last point for range */
502                 if (item->last) {
503                         rte_flow_error_set(error,
504                                 EINVAL,
505                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
506                                 item, "Not supported last point for range");
507                         return -rte_errno;
508                 }
509                 /* if the first item is MAC, the content should be NULL */
510                 if (item->spec || item->mask) {
511                         rte_flow_error_set(error, EINVAL,
512                                 RTE_FLOW_ERROR_TYPE_ITEM,
513                                 item, "Not supported by ntuple filter");
514                         return -rte_errno;
515                 }
516                 /* check if the next not void item is IPv4 */
517                 item = next_no_void_pattern(pattern, item);
518                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
519                         rte_flow_error_set(error,
520                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
521                                 item, "Not supported by ntuple filter");
522                         return -rte_errno;
523                 }
524         }
525
526         *ipv4_item = item;
527         return 0;
528 }
529
530 static int
531 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
532                         const struct rte_flow_item pattern[],
533                         struct rte_eth_ntuple_filter *filter,
534                         struct rte_flow_error *error)
535 {
536         const struct rte_flow_item_ipv4 *ipv4_spec;
537         const struct rte_flow_item_ipv4 *ipv4_mask;
538         const struct rte_flow_item *item = *in_out_item;
539
540         /* Get the IPv4 info */
541         if (!item->spec || !item->mask) {
542                 rte_flow_error_set(error, EINVAL,
543                         RTE_FLOW_ERROR_TYPE_ITEM,
544                         item, "Invalid ntuple mask");
545                 return -rte_errno;
546         }
547         /* Not supported last point for range */
548         if (item->last) {
549                 rte_flow_error_set(error, EINVAL,
550                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
551                         item, "Not supported last point for range");
552                 return -rte_errno;
553         }
554
555         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
556         /*
557          * Only support src & dst addresses, protocol,
558          * others should be masked.
559          */
560         if (ipv4_mask->hdr.version_ihl ||
561                 ipv4_mask->hdr.type_of_service ||
562                 ipv4_mask->hdr.total_length ||
563                 ipv4_mask->hdr.packet_id ||
564                 ipv4_mask->hdr.fragment_offset ||
565                 ipv4_mask->hdr.time_to_live ||
566                 ipv4_mask->hdr.hdr_checksum ||
567                 !ipv4_mask->hdr.next_proto_id) {
568                 rte_flow_error_set(error,
569                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
570                         item, "Not supported by ntuple filter");
571                 return -rte_errno;
572         }
573
574         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
575         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
576         filter->proto_mask = ipv4_mask->hdr.next_proto_id;
577
578         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
579         filter->dst_ip = ipv4_spec->hdr.dst_addr;
580         filter->src_ip = ipv4_spec->hdr.src_addr;
581         filter->proto  = ipv4_spec->hdr.next_proto_id;
582
583         /* Get next no void item */
584         *in_out_item = next_no_void_pattern(pattern, item);
585         return 0;
586 }
587
588 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
589                                 const struct rte_flow_item pattern[],
590                                 struct rte_eth_ntuple_filter *filter,
591                                 struct rte_flow_error *error)
592 {
593         const struct rte_flow_item_tcp *tcp_spec;
594         const struct rte_flow_item_tcp *tcp_mask;
595         const struct rte_flow_item_icmp *icmp_mask;
596         const struct rte_flow_item *item = *in_out_item;
597         u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
598
599         if (item->type == RTE_FLOW_ITEM_TYPE_END)
600                 return 0;
601
602         /* Get TCP or UDP info */
603         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
604                 (!item->spec || !item->mask)) {
605                 memset(filter, 0, ntuple_filter_size);
606                 rte_flow_error_set(error, EINVAL,
607                         RTE_FLOW_ERROR_TYPE_ITEM,
608                         item, "Invalid ntuple mask");
609                 return -rte_errno;
610         }
611
612         /* Not supported last point for range */
613         if (item->last) {
614                 memset(filter, 0, ntuple_filter_size);
615                 rte_flow_error_set(error, EINVAL,
616                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
617                         item, "Not supported last point for range");
618                 return -rte_errno;
619         }
620
621         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
622                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
623
624                 /*
625                  * Only support src & dst ports, tcp flags,
626                  * others should be masked.
627                  */
628                 if (tcp_mask->hdr.sent_seq ||
629                         tcp_mask->hdr.recv_ack ||
630                         tcp_mask->hdr.data_off ||
631                         tcp_mask->hdr.rx_win ||
632                         tcp_mask->hdr.cksum ||
633                         tcp_mask->hdr.tcp_urp) {
634                         memset(filter, 0, ntuple_filter_size);
635                         rte_flow_error_set(error, EINVAL,
636                                 RTE_FLOW_ERROR_TYPE_ITEM,
637                                 item, "Not supported by ntuple filter");
638                         return -rte_errno;
639                 }
640
641                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
642                 filter->src_port_mask  = tcp_mask->hdr.src_port;
643                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
644                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
645                 } else if (!tcp_mask->hdr.tcp_flags) {
646                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
647                 } else {
648                         memset(filter, 0, ntuple_filter_size);
649                         rte_flow_error_set(error, EINVAL,
650                                 RTE_FLOW_ERROR_TYPE_ITEM,
651                                 item, "Not supported by ntuple filter");
652                         return -rte_errno;
653                 }
654
655                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
656                 filter->dst_port  = tcp_spec->hdr.dst_port;
657                 filter->src_port  = tcp_spec->hdr.src_port;
658                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
659         } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
660                 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
661
662                 /* ICMP all should be masked. */
663                 if (icmp_mask->hdr.icmp_cksum ||
664                         icmp_mask->hdr.icmp_ident ||
665                         icmp_mask->hdr.icmp_seq_nb ||
666                         icmp_mask->hdr.icmp_type ||
667                         icmp_mask->hdr.icmp_code) {
668                         memset(filter, 0, ntuple_filter_size);
669                         rte_flow_error_set(error, EINVAL,
670                                 RTE_FLOW_ERROR_TYPE_ITEM,
671                                 item, "Not supported by ntuple filter");
672                         return -rte_errno;
673                 }
674         }
675
676         /* Get next no void item */
677         *in_out_item = next_no_void_pattern(pattern, item);
678         return 0;
679 }
680
681 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
682                                         struct rte_eth_ntuple_filter *filter,
683                                         struct rte_flow_error *error)
684 {
685         /* Check if the next not void item is END */
686         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
687                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
688                 rte_flow_error_set(error, EINVAL,
689                         RTE_FLOW_ERROR_TYPE_ITEM,
690                         item, "Not supported by ntuple filter");
691                 return -rte_errno;
692         }
693         return 0;
694 }
695
696 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
697                                         const struct rte_flow_item pattern[],
698                                         struct rte_eth_ntuple_filter *filter,
699                                         struct rte_flow_error *error)
700 {
701         if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
702                 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
703                 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
704                 hinic_ntuple_item_check_end(item, filter, error))
705                 return -rte_errno;
706
707         return 0;
708 }
709
710 /**
711  * Parse the rule to see if it is a n-tuple rule.
712  * And get the n-tuple filter info BTW.
713  * pattern:
714  * The first not void item can be ETH or IPV4.
715  * The second not void item must be IPV4 if the first one is ETH.
716  * The third not void item must be UDP or TCP.
717  * The next not void item must be END.
718  * action:
719  * The first not void action should be QUEUE.
720  * The next not void action should be END.
721  * pattern example:
722  * ITEM         Spec                    Mask
723  * ETH          NULL                    NULL
724  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
725  *              dst_addr 192.167.3.50   0xFFFFFFFF
726  *              next_proto_id   17      0xFF
727  * UDP/TCP/     src_port        80      0xFFFF
728  * SCTP         dst_port        80      0xFFFF
729  * END
730  * other members in mask and spec should set to 0x00.
731  * item->last should be NULL.
732  * Please aware there's an asumption for all the parsers.
733  * rte_flow_item is using big endian, rte_flow_attr and
734  * rte_flow_action are using CPU order.
735  * Because the pattern is used to describe the packets,
736  * normally the packets should use network order.
737  */
738 static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
739                         const struct rte_flow_item pattern[],
740                         const struct rte_flow_action actions[],
741                         struct rte_eth_ntuple_filter *filter,
742                         struct rte_flow_error *error)
743 {
744         const struct rte_flow_item *item = NULL;
745
746         if (hinic_check_filter_arg(attr, pattern, actions, error))
747                 return -rte_errno;
748
749         if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
750                 return -rte_errno;
751
752         if (hinic_check_ntuple_act_ele(item, actions, filter, error))
753                 return -rte_errno;
754
755         if (hinic_check_ntuple_attr_ele(attr, filter, error))
756                 return -rte_errno;
757
758         return 0;
759 }
760
761 static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
762                         const struct rte_flow_attr *attr,
763                         const struct rte_flow_item pattern[],
764                         const struct rte_flow_action actions[],
765                         struct rte_eth_ntuple_filter *filter,
766                         struct rte_flow_error *error)
767 {
768         int ret;
769
770         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
771         if (ret)
772                 return ret;
773
774         /* Hinic doesn't support tcp flags */
775         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
776                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
777                 rte_flow_error_set(error, EINVAL,
778                                    RTE_FLOW_ERROR_TYPE_ITEM,
779                                    NULL, "Not supported by ntuple filter");
780                 return -rte_errno;
781         }
782
783         /* Hinic doesn't support many priorities */
784         if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
785             filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
786                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
787                 rte_flow_error_set(error, EINVAL,
788                         RTE_FLOW_ERROR_TYPE_ITEM,
789                         NULL, "Priority not supported by ntuple filter");
790                 return -rte_errno;
791         }
792
793         if (filter->queue >= dev->data->nb_rx_queues)
794                 return -rte_errno;
795
796         /* Fixed value for hinic */
797         filter->flags = RTE_5TUPLE_FLAGS;
798         return 0;
799 }
800
801 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
802                                         const struct rte_flow_item pattern[],
803                                         struct rte_flow_error *error)
804 {
805         const struct rte_flow_item *item;
806
807         /* The first not void item can be MAC or IPv4  or TCP or UDP */
808         item = next_no_void_pattern(pattern, NULL);
809
810         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
811                 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
812                 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
813                 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
814                 rte_flow_error_set(error, EINVAL,
815                         RTE_FLOW_ERROR_TYPE_ITEM, item,
816                         "Not supported by fdir filter,support mac,ipv4,tcp,udp");
817                 return -rte_errno;
818         }
819
820         /* Not supported last point for range */
821         if (item->last) {
822                 rte_flow_error_set(error, EINVAL,
823                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
824                         "Not supported last point for range");
825                 return -rte_errno;
826         }
827
828         /* Skip Ethernet */
829         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
830                 /* All should be masked. */
831                 if (item->spec || item->mask) {
832                         rte_flow_error_set(error, EINVAL,
833                                 RTE_FLOW_ERROR_TYPE_ITEM,
834                                 item, "Not supported by fdir filter,support mac");
835                         return -rte_errno;
836                 }
837                 /* Check if the next not void item is IPv4 */
838                 item = next_no_void_pattern(pattern, item);
839                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
840                         rte_flow_error_set(error, EINVAL,
841                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
842                                 "Not supported by fdir filter,support mac,ipv4");
843                         return -rte_errno;
844                 }
845         }
846
847         *ip_item = item;
848         return 0;
849 }
850
851 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
852                                 const struct rte_flow_item pattern[],
853                                 struct hinic_fdir_rule *rule,
854                                 struct rte_flow_error *error)
855 {
856         const struct rte_flow_item_ipv4 *ipv4_spec;
857         const struct rte_flow_item_ipv4 *ipv4_mask;
858         const struct rte_flow_item *item = *in_out_item;
859
860         /* Get the IPv4 info */
861         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
862                 /* Not supported last point for range */
863                 if (item->last) {
864                         rte_flow_error_set(error, EINVAL,
865                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
866                                 item, "Not supported last point for range");
867                         return -rte_errno;
868                 }
869
870                 if (!item->mask) {
871                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
872                         rte_flow_error_set(error, EINVAL,
873                                 RTE_FLOW_ERROR_TYPE_ITEM,
874                                 item, "Invalid fdir filter mask");
875                         return -rte_errno;
876                 }
877
878                 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
879                 /*
880                  * Only support src & dst addresses,
881                  * others should be masked.
882                  */
883                 if (ipv4_mask->hdr.version_ihl ||
884                         ipv4_mask->hdr.type_of_service ||
885                         ipv4_mask->hdr.total_length ||
886                         ipv4_mask->hdr.packet_id ||
887                         ipv4_mask->hdr.fragment_offset ||
888                         ipv4_mask->hdr.time_to_live ||
889                         ipv4_mask->hdr.next_proto_id ||
890                         ipv4_mask->hdr.hdr_checksum) {
891                         rte_flow_error_set(error,
892                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
893                                 "Not supported by fdir filter, support src,dst ip");
894                         return -rte_errno;
895                 }
896
897                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
898                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
899                 rule->mode = HINIC_FDIR_MODE_NORMAL;
900
901                 if (item->spec) {
902                         ipv4_spec =
903                                 (const struct rte_flow_item_ipv4 *)item->spec;
904                         rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
905                         rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
906                 }
907
908                 /*
909                  * Check if the next not void item is
910                  * TCP or UDP or END.
911                  */
912                 item = next_no_void_pattern(pattern, item);
913                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
914                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
915                     item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
916                     item->type != RTE_FLOW_ITEM_TYPE_ANY &&
917                     item->type != RTE_FLOW_ITEM_TYPE_END) {
918                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
919                         rte_flow_error_set(error, EINVAL,
920                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
921                                 "Not supported by fdir filter, support tcp, udp, end");
922                         return -rte_errno;
923                 }
924         }
925
926         *in_out_item = item;
927         return 0;
928 }
929
930 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
931                         __rte_unused const struct rte_flow_item pattern[],
932                         __rte_unused struct hinic_fdir_rule *rule,
933                         struct rte_flow_error *error)
934 {
935         const struct rte_flow_item *item = *in_out_item;
936
937         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
938                 rte_flow_error_set(error, EINVAL,
939                         RTE_FLOW_ERROR_TYPE_ITEM,
940                         item, "Not supported by normal fdir filter,not support l4");
941                 return -rte_errno;
942         }
943
944         return 0;
945 }
946
947
948 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
949                                         struct hinic_fdir_rule *rule,
950                                         struct rte_flow_error *error)
951 {
952         /* Check if the next not void item is END */
953         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
954                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
955                 rte_flow_error_set(error, EINVAL,
956                         RTE_FLOW_ERROR_TYPE_ITEM,
957                         item, "Not supported by fdir filter,support end");
958                 return -rte_errno;
959         }
960
961         return 0;
962 }
963
964 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
965                                         const struct rte_flow_item pattern[],
966                                         struct hinic_fdir_rule *rule,
967                                         struct rte_flow_error *error)
968 {
969         if (hinic_normal_item_check_ether(&item, pattern, error) ||
970                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
971                 hinic_normal_item_check_l4(&item, pattern, rule, error) ||
972                 hinic_normal_item_check_end(item, rule, error))
973                 return -rte_errno;
974
975         return 0;
976 }
977
978 static int
979 hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item,
980                                 const struct rte_flow_item pattern[],
981                                 struct hinic_fdir_rule *rule,
982                                 struct rte_flow_error *error)
983 {
984         const struct rte_flow_item *item = *in_out_item;
985         const struct rte_flow_item_tcp *tcp_spec;
986         const struct rte_flow_item_tcp *tcp_mask;
987         const struct rte_flow_item_udp *udp_spec;
988         const struct rte_flow_item_udp *udp_mask;
989
990         if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
991                 rule->mode = HINIC_FDIR_MODE_TCAM;
992                 rule->mask.proto_mask = UINT16_MAX;
993                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
994         } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
995                 rule->mode = HINIC_FDIR_MODE_TCAM;
996         } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
997                 if (!item->mask) {
998                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
999                         rte_flow_error_set(error, EINVAL,
1000                                 RTE_FLOW_ERROR_TYPE_ITEM,
1001                                 item, "Not supported by fdir filter, support src, dst ports");
1002                         return -rte_errno;
1003                 }
1004
1005                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1006
1007                 /*
1008                  * Only support src & dst ports, tcp flags,
1009                  * others should be masked.
1010                  */
1011                 if (tcp_mask->hdr.sent_seq ||
1012                         tcp_mask->hdr.recv_ack ||
1013                         tcp_mask->hdr.data_off ||
1014                         tcp_mask->hdr.rx_win ||
1015                         tcp_mask->hdr.cksum ||
1016                         tcp_mask->hdr.tcp_urp) {
1017                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1018                         rte_flow_error_set(error, EINVAL,
1019                                 RTE_FLOW_ERROR_TYPE_ITEM,
1020                                 item, "Not supported by fdir normal tcam filter");
1021                         return -rte_errno;
1022                 }
1023
1024                 rule->mode = HINIC_FDIR_MODE_TCAM;
1025                 rule->mask.proto_mask = UINT16_MAX;
1026                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1027                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1028
1029                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1030                 if (item->spec) {
1031                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1032                         rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port;
1033                         rule->hinic_fdir.src_port = tcp_spec->hdr.src_port;
1034                 }
1035         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1036                 /*
1037                  * Only care about src & dst ports,
1038                  * others should be masked.
1039                  */
1040                 if (!item->mask) {
1041                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1042                         rte_flow_error_set(error, EINVAL,
1043                                 RTE_FLOW_ERROR_TYPE_ITEM,
1044                                 item, "Not supported by fdir filter, support src, dst ports");
1045                         return -rte_errno;
1046                 }
1047
1048                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1049                 if (udp_mask->hdr.dgram_len ||
1050                         udp_mask->hdr.dgram_cksum) {
1051                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1052                         rte_flow_error_set(error, EINVAL,
1053                                 RTE_FLOW_ERROR_TYPE_ITEM,
1054                                 item, "Not supported by fdir filter, support udp");
1055                         return -rte_errno;
1056                 }
1057
1058                 rule->mode = HINIC_FDIR_MODE_TCAM;
1059                 rule->mask.proto_mask = UINT16_MAX;
1060                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1061                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1062
1063                 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1064                 if (item->spec) {
1065                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1066                         rule->hinic_fdir.src_port = udp_spec->hdr.src_port;
1067                         rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port;
1068                 }
1069         } else {
1070                 (void)memset(rule,  0, sizeof(struct hinic_fdir_rule));
1071                 rte_flow_error_set(error, EINVAL,
1072                                 RTE_FLOW_ERROR_TYPE_ITEM,
1073                                 item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp");
1074                 return -rte_errno;
1075         }
1076
1077         item = next_no_void_pattern(pattern, item);
1078         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1079                 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1080                 rte_flow_error_set(error, EINVAL,
1081                         RTE_FLOW_ERROR_TYPE_ITEM,
1082                         item, "Not supported by fdir filter tcam normal, support end");
1083                 return -rte_errno;
1084         }
1085
1086         /* get next no void item */
1087         *in_out_item = item;
1088
1089         return 0;
1090 }
1091
1092 static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item,
1093                                         const struct rte_flow_item pattern[],
1094                                         struct hinic_fdir_rule *rule,
1095                                         struct rte_flow_error *error)
1096 {
1097         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1098                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1099                 hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) ||
1100                 hinic_normal_item_check_end(item, rule, error))
1101                 return -rte_errno;
1102
1103         return 0;
1104 }
1105
1106 static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item,
1107                                         const struct rte_flow_item pattern[],
1108                                         struct hinic_fdir_rule *rule,
1109                                         struct rte_flow_error *error)
1110 {
1111         const struct rte_flow_item *item = *in_out_item;
1112
1113         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1114                 item = next_no_void_pattern(pattern, item);
1115                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1116                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1117                         rte_flow_error_set(error, EINVAL,
1118                                 RTE_FLOW_ERROR_TYPE_ITEM,
1119                                 item, "Not supported by fdir filter, support vxlan");
1120                         return -rte_errno;
1121                 }
1122
1123                 *in_out_item = item;
1124         } else {
1125                 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1126                 rte_flow_error_set(error, EINVAL,
1127                                 RTE_FLOW_ERROR_TYPE_ITEM,
1128                                 item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp");
1129                 return -rte_errno;
1130         }
1131
1132         return 0;
1133 }
1134
1135 static int
1136 hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item,
1137                                 const struct rte_flow_item pattern[],
1138                                 struct hinic_fdir_rule *rule,
1139                                 struct rte_flow_error *error)
1140 {
1141         const struct rte_flow_item *item = *in_out_item;
1142
1143
1144         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
1145                 item = next_no_void_pattern(pattern, item);
1146                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1147                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1148                     item->type != RTE_FLOW_ITEM_TYPE_ANY) {
1149                         (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1150                         rte_flow_error_set(error, EINVAL,
1151                                 RTE_FLOW_ERROR_TYPE_ITEM,
1152                                 item, "Not supported by fdir filter, support tcp/udp");
1153                         return -rte_errno;
1154                 }
1155
1156                 *in_out_item = item;
1157         }
1158
1159         return 0;
1160 }
1161
1162 static int
1163 hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item,
1164                                 const struct rte_flow_item pattern[],
1165                                 struct hinic_fdir_rule *rule,
1166                                 struct rte_flow_error *error)
1167 {
1168         const struct rte_flow_item_tcp *tcp_spec;
1169         const struct rte_flow_item_tcp *tcp_mask;
1170         const struct rte_flow_item_udp *udp_spec;
1171         const struct rte_flow_item_udp *udp_mask;
1172         const struct rte_flow_item *item = *in_out_item;
1173
1174         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1175                 /* Not supported last point for range */
1176                 if (item->last) {
1177                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1178                         rte_flow_error_set(error, EINVAL,
1179                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1180                                 item, "Not supported last point for range");
1181                         return -rte_errno;
1182                 }
1183
1184                 /* get the TCP/UDP info */
1185                 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1186                         /*
1187                          * Only care about src & dst ports,
1188                          * others should be masked.
1189                          */
1190                         if (!item->mask) {
1191                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1192                                 rte_flow_error_set(error, EINVAL,
1193                                         RTE_FLOW_ERROR_TYPE_ITEM,
1194                                         item, "Not supported by fdir filter, support src, dst ports");
1195                                 return -rte_errno;
1196                         }
1197
1198                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1199                         if (tcp_mask->hdr.sent_seq ||
1200                                 tcp_mask->hdr.recv_ack ||
1201                                 tcp_mask->hdr.data_off ||
1202                                 tcp_mask->hdr.tcp_flags ||
1203                                 tcp_mask->hdr.rx_win ||
1204                                 tcp_mask->hdr.cksum ||
1205                                 tcp_mask->hdr.tcp_urp) {
1206                                 (void)memset(rule, 0,
1207                                         sizeof(struct hinic_fdir_rule));
1208                                 rte_flow_error_set(error, EINVAL,
1209                                         RTE_FLOW_ERROR_TYPE_ITEM,
1210                                         item, "Not supported by fdir filter, support tcp");
1211                                 return -rte_errno;
1212                         }
1213
1214                         rule->mode = HINIC_FDIR_MODE_TCAM;
1215                         rule->mask.tunnel_flag = UINT16_MAX;
1216                         rule->mask.tunnel_inner_src_port_mask =
1217                                                         tcp_mask->hdr.src_port;
1218                         rule->mask.tunnel_inner_dst_port_mask =
1219                                                         tcp_mask->hdr.dst_port;
1220                         rule->mask.proto_mask = UINT16_MAX;
1221
1222                         rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1223                         if (item->spec) {
1224                                 tcp_spec =
1225                                 (const struct rte_flow_item_tcp *)item->spec;
1226                                 rule->hinic_fdir.tunnel_inner_src_port =
1227                                                         tcp_spec->hdr.src_port;
1228                                 rule->hinic_fdir.tunnel_inner_dst_port =
1229                                                         tcp_spec->hdr.dst_port;
1230                         }
1231                 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1232                         /*
1233                          * Only care about src & dst ports,
1234                          * others should be masked.
1235                          */
1236                         if (!item->mask) {
1237                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1238                                 rte_flow_error_set(error, EINVAL,
1239                                         RTE_FLOW_ERROR_TYPE_ITEM,
1240                                         item, "Not supported by fdir filter, support src, dst ports");
1241                                 return -rte_errno;
1242                         }
1243
1244                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
1245                         if (udp_mask->hdr.dgram_len ||
1246                             udp_mask->hdr.dgram_cksum) {
1247                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1248                                 rte_flow_error_set(error, EINVAL,
1249                                         RTE_FLOW_ERROR_TYPE_ITEM,
1250                                         item, "Not supported by fdir filter, support udp");
1251                                 return -rte_errno;
1252                         }
1253
1254                         rule->mode = HINIC_FDIR_MODE_TCAM;
1255                         rule->mask.tunnel_flag = UINT16_MAX;
1256                         rule->mask.tunnel_inner_src_port_mask =
1257                                                         udp_mask->hdr.src_port;
1258                         rule->mask.tunnel_inner_dst_port_mask =
1259                                                         udp_mask->hdr.dst_port;
1260                         rule->mask.proto_mask = UINT16_MAX;
1261
1262                         rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1263                         if (item->spec) {
1264                                 udp_spec =
1265                                 (const struct rte_flow_item_udp *)item->spec;
1266                                 rule->hinic_fdir.tunnel_inner_src_port =
1267                                                         udp_spec->hdr.src_port;
1268                                 rule->hinic_fdir.tunnel_inner_dst_port =
1269                                                         udp_spec->hdr.dst_port;
1270                         }
1271                 } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1272                         rule->mode = HINIC_FDIR_MODE_TCAM;
1273                         rule->mask.tunnel_flag = UINT16_MAX;
1274                 } else {
1275                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1276                         rte_flow_error_set(error, EINVAL,
1277                                 RTE_FLOW_ERROR_TYPE_ITEM,
1278                                 item, "Not supported by fdir filter, support tcp/udp");
1279                         return -rte_errno;
1280                 }
1281
1282                 /* get next no void item */
1283                 *in_out_item = next_no_void_pattern(pattern, item);
1284         }
1285
1286         return 0;
1287 }
1288
1289 static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item,
1290                                         const struct rte_flow_item pattern[],
1291                                         struct hinic_fdir_rule *rule,
1292                                         struct rte_flow_error *error)
1293 {
1294         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1295                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1296                 hinic_tunnel_item_check_l4(&item, pattern, rule, error) ||
1297                 hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) ||
1298                 hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) ||
1299                 hinic_normal_item_check_end(item, rule, error))
1300                 return -rte_errno;
1301
1302         return 0;
1303 }
1304
1305 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1306                                         struct hinic_fdir_rule *rule,
1307                                         struct rte_flow_error *error)
1308 {
1309         /* Must be input direction */
1310         if (!attr->ingress) {
1311                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1312                 rte_flow_error_set(error, EINVAL,
1313                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1314                                    attr, "Only support ingress.");
1315                 return -rte_errno;
1316         }
1317
1318         /* Not supported */
1319         if (attr->egress) {
1320                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1321                 rte_flow_error_set(error, EINVAL,
1322                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1323                                    attr, "Not support egress.");
1324                 return -rte_errno;
1325         }
1326
1327         /* Not supported */
1328         if (attr->priority) {
1329                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1330                 rte_flow_error_set(error, EINVAL,
1331                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1332                         attr, "Not support priority.");
1333                 return -rte_errno;
1334         }
1335
1336         return 0;
1337 }
1338
1339 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1340                                 const struct rte_flow_action actions[],
1341                                 struct hinic_fdir_rule *rule,
1342                                 struct rte_flow_error *error)
1343 {
1344         const struct rte_flow_action *act;
1345
1346         /* Check if the first not void action is QUEUE */
1347         act = next_no_void_action(actions, NULL);
1348         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1349                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1350                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1351                         item, "Not supported action.");
1352                 return -rte_errno;
1353         }
1354
1355         rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1356
1357         /* Check if the next not void item is END */
1358         act = next_no_void_action(actions, act);
1359         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1360                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1361                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1362                         act, "Not supported action.");
1363                 return -rte_errno;
1364         }
1365
1366         return 0;
1367 }
1368
1369 /**
1370  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1371  * And get the flow director filter info BTW.
1372  * UDP/TCP/SCTP PATTERN:
1373  * The first not void item can be ETH or IPV4 or IPV6
1374  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1375  * The next not void item could be UDP or TCP(optional)
1376  * The next not void item must be END.
1377  * ACTION:
1378  * The first not void action should be QUEUE.
1379  * The second not void optional action should be MARK,
1380  * mark_id is a uint32_t number.
1381  * The next not void action should be END.
1382  * UDP/TCP pattern example:
1383  * ITEM          Spec                                       Mask
1384  * ETH            NULL                                    NULL
1385  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1386  *                   dst_addr  1.2.3.5                 0xFFFFFFFF
1387  * UDP/TCP    src_port  80                         0xFFFF
1388  *                   dst_port  80                         0xFFFF
1389  * END
1390  * Other members in mask and spec should set to 0x00.
1391  * Item->last should be NULL.
1392  */
1393 static int
1394 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1395                                const struct rte_flow_item pattern[],
1396                                const struct rte_flow_action actions[],
1397                                struct hinic_fdir_rule *rule,
1398                                struct rte_flow_error *error)
1399 {
1400         const struct rte_flow_item *item = NULL;
1401
1402         if (hinic_check_filter_arg(attr, pattern, actions, error))
1403                 return -rte_errno;
1404
1405         if (hinic_check_normal_item_ele(item, pattern, rule, error))
1406                 return -rte_errno;
1407
1408         if (hinic_check_normal_attr_ele(attr, rule, error))
1409                 return -rte_errno;
1410
1411         if (hinic_check_normal_act_ele(item, actions, rule, error))
1412                 return -rte_errno;
1413
1414         return 0;
1415 }
1416
1417 /**
1418  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1419  * And get the flow director filter info BTW.
1420  * UDP/TCP/SCTP PATTERN:
1421  * The first not void item can be ETH or IPV4 or IPV6
1422  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1423  * The next not void item can be ANY/TCP/UDP
1424  * ACTION:
1425  * The first not void action should be QUEUE.
1426  * The second not void optional action should be MARK,
1427  * mark_id is a uint32_t number.
1428  * The next not void action should be END.
1429  * UDP/TCP pattern example:
1430  * ITEM                 Spec                           Mask
1431  * ETH            NULL                                 NULL
1432  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1433  *                dst_addr  1.2.3.5                 0xFFFFFFFF
1434  * UDP/TCP        src_port  80                      0xFFFF
1435  *                dst_port  80                      0xFFFF
1436  * END
1437  * Other members in mask and spec should set to 0x00.
1438  * Item->last should be NULL.
1439  */
1440 static int
1441 hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr,
1442                                const struct rte_flow_item pattern[],
1443                                const struct rte_flow_action actions[],
1444                                struct hinic_fdir_rule *rule,
1445                                struct rte_flow_error *error)
1446 {
1447         const struct rte_flow_item *item = NULL;
1448
1449         if (hinic_check_filter_arg(attr, pattern, actions, error))
1450                 return -rte_errno;
1451
1452         if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error))
1453                 return -rte_errno;
1454
1455         if (hinic_check_normal_attr_ele(attr, rule, error))
1456                 return -rte_errno;
1457
1458         if (hinic_check_normal_act_ele(item, actions, rule, error))
1459                 return -rte_errno;
1460
1461         return 0;
1462 }
1463
1464 /**
1465  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1466  * And get the flow director filter info BTW.
1467  * UDP/TCP/SCTP PATTERN:
1468  * The first not void item can be ETH or IPV4 or IPV6
1469  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1470  * The next not void item must be UDP
1471  * The next not void item must be VXLAN(optional)
1472  * The first not void item can be ETH or IPV4 or IPV6
1473  * The next not void item could be ANY or UDP or TCP(optional)
1474  * The next not void item must be END.
1475  * ACTION:
1476  * The first not void action should be QUEUE.
1477  * The second not void optional action should be MARK,
1478  * mark_id is a uint32_t number.
1479  * The next not void action should be END.
1480  * UDP/TCP pattern example:
1481  * ITEM             Spec                            Mask
1482  * ETH            NULL                              NULL
1483  * IPV4        src_addr  1.2.3.6                 0xFFFFFFFF
1484  *             dst_addr  1.2.3.5                 0xFFFFFFFF
1485  * UDP            NULL                              NULL
1486  * VXLAN          NULL                              NULL
1487  * UDP/TCP     src_port  80                      0xFFFF
1488  *             dst_port  80                      0xFFFF
1489  * END
1490  * Other members in mask and spec should set to 0x00.
1491  * Item->last should be NULL.
1492  */
1493 static int
1494 hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr,
1495                                const struct rte_flow_item pattern[],
1496                                const struct rte_flow_action actions[],
1497                                struct hinic_fdir_rule *rule,
1498                                struct rte_flow_error *error)
1499 {
1500         const struct rte_flow_item *item = NULL;
1501
1502         if (hinic_check_filter_arg(attr, pattern, actions, error))
1503                 return -rte_errno;
1504
1505         if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error))
1506                 return -rte_errno;
1507
1508         if (hinic_check_normal_attr_ele(attr, rule, error))
1509                 return -rte_errno;
1510
1511         if (hinic_check_normal_act_ele(item, actions, rule, error))
1512                 return -rte_errno;
1513
1514         return 0;
1515 }
1516
1517 static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1518                         const struct rte_flow_attr *attr,
1519                         const struct rte_flow_item pattern[],
1520                         const struct rte_flow_action actions[],
1521                         struct hinic_fdir_rule *rule,
1522                         struct rte_flow_error *error)
1523 {
1524         int ret;
1525
1526         ret = hinic_parse_fdir_filter_normal(attr, pattern, actions,
1527                                                 rule, error);
1528         if (!ret)
1529                 goto step_next;
1530
1531         ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions,
1532                                                 rule, error);
1533         if (!ret)
1534                 goto step_next;
1535
1536         ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions,
1537                                                 rule, error);
1538         if (ret)
1539                 return ret;
1540
1541 step_next:
1542         if (rule->queue >= dev->data->nb_rx_queues)
1543                 return -ENOTSUP;
1544
1545         return ret;
1546 }
1547
1548 /**
1549  * Check if the flow rule is supported by nic.
1550  * It only checkes the format. Don't guarantee the rule can be programmed into
1551  * the HW. Because there can be no enough room for the rule.
1552  */
1553 static int hinic_flow_validate(struct rte_eth_dev *dev,
1554                                 const struct rte_flow_attr *attr,
1555                                 const struct rte_flow_item pattern[],
1556                                 const struct rte_flow_action actions[],
1557                                 struct rte_flow_error *error)
1558 {
1559         struct rte_eth_ethertype_filter ethertype_filter;
1560         struct rte_eth_ntuple_filter ntuple_filter;
1561         struct hinic_fdir_rule fdir_rule;
1562         int ret;
1563
1564         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1565         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1566                                 actions, &ntuple_filter, error);
1567         if (!ret)
1568                 return 0;
1569
1570         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1571         ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1572                                 actions, &ethertype_filter, error);
1573
1574         if (!ret)
1575                 return 0;
1576
1577         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1578         ret = hinic_parse_fdir_filter(dev, attr, pattern,
1579                                 actions, &fdir_rule, error);
1580
1581         return ret;
1582 }
1583
1584 static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1585                  struct hinic_5tuple_filter_info *hinic_filter_info)
1586 {
1587         switch (filter->dst_ip_mask) {
1588         case UINT32_MAX:
1589                 hinic_filter_info->dst_ip_mask = 0;
1590                 hinic_filter_info->dst_ip = filter->dst_ip;
1591                 break;
1592         case 0:
1593                 hinic_filter_info->dst_ip_mask = 1;
1594                 hinic_filter_info->dst_ip = 0;
1595                 break;
1596         default:
1597                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1598                 return -EINVAL;
1599         }
1600
1601         switch (filter->src_ip_mask) {
1602         case UINT32_MAX:
1603                 hinic_filter_info->src_ip_mask = 0;
1604                 hinic_filter_info->src_ip = filter->src_ip;
1605                 break;
1606         case 0:
1607                 hinic_filter_info->src_ip_mask = 1;
1608                 hinic_filter_info->src_ip = 0;
1609                 break;
1610         default:
1611                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1612                 return -EINVAL;
1613         }
1614         return 0;
1615 }
1616
1617 static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1618                    struct hinic_5tuple_filter_info *hinic_filter_info)
1619 {
1620         switch (filter->dst_port_mask) {
1621         case UINT16_MAX:
1622                 hinic_filter_info->dst_port_mask = 0;
1623                 hinic_filter_info->dst_port = filter->dst_port;
1624                 break;
1625         case 0:
1626                 hinic_filter_info->dst_port_mask = 1;
1627                 hinic_filter_info->dst_port = 0;
1628                 break;
1629         default:
1630                 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1631                 return -EINVAL;
1632         }
1633
1634         switch (filter->src_port_mask) {
1635         case UINT16_MAX:
1636                 hinic_filter_info->src_port_mask = 0;
1637                 hinic_filter_info->src_port = filter->src_port;
1638                 break;
1639         case 0:
1640                 hinic_filter_info->src_port_mask = 1;
1641                 hinic_filter_info->src_port = 0;
1642                 break;
1643         default:
1644                 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1645                 return -EINVAL;
1646         }
1647
1648         return 0;
1649 }
1650
1651 static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1652                     struct hinic_5tuple_filter_info *hinic_filter_info)
1653 {
1654         switch (filter->proto_mask) {
1655         case UINT8_MAX:
1656                 hinic_filter_info->proto_mask = 0;
1657                 hinic_filter_info->proto = filter->proto;
1658                 break;
1659         case 0:
1660                 hinic_filter_info->proto_mask = 1;
1661                 hinic_filter_info->proto = 0;
1662                 break;
1663         default:
1664                 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1665                 return -EINVAL;
1666         }
1667
1668         return 0;
1669 }
1670
1671 static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1672                         struct hinic_5tuple_filter_info *filter_info)
1673 {
1674         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1675                 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1676                 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1677                 return -EINVAL;
1678
1679         if (ntuple_ip_filter(filter, filter_info) ||
1680                 ntuple_port_filter(filter, filter_info) ||
1681                 ntuple_proto_filter(filter, filter_info))
1682                 return -EINVAL;
1683
1684         filter_info->priority = (uint8_t)filter->priority;
1685         return 0;
1686 }
1687
1688 static inline struct hinic_5tuple_filter *
1689 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1690                            struct hinic_5tuple_filter_info *key)
1691 {
1692         struct hinic_5tuple_filter *it;
1693
1694         TAILQ_FOREACH(it, filter_list, entries) {
1695                 if (memcmp(key, &it->filter_info,
1696                         sizeof(struct hinic_5tuple_filter_info)) == 0) {
1697                         return it;
1698                 }
1699         }
1700
1701         return NULL;
1702 }
1703
1704 static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
1705 {
1706         struct tag_pa_rule lacp_rule;
1707         struct tag_pa_action lacp_action;
1708
1709         memset(&lacp_rule, 0, sizeof(lacp_rule));
1710         memset(&lacp_action, 0, sizeof(lacp_action));
1711         /* LACP TCAM rule */
1712         lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
1713         lacp_rule.l2_header.eth_type.val16 = 0x8809;
1714         lacp_rule.l2_header.eth_type.mask16 = 0xffff;
1715
1716         /* LACP TCAM action */
1717         lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
1718         lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1719         lacp_action.pkt_type = PKT_LACP_TYPE;
1720         lacp_action.pri = 0x0;
1721         lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
1722
1723         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
1724                                         &lacp_rule, &lacp_action);
1725 }
1726
1727 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1728 {
1729         struct tag_pa_rule bgp_rule;
1730         struct tag_pa_action bgp_action;
1731
1732         memset(&bgp_rule, 0, sizeof(bgp_rule));
1733         memset(&bgp_action, 0, sizeof(bgp_action));
1734         /* BGP TCAM rule */
1735         bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1736         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1737         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1738         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1739         bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1740         bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1741
1742         /* BGP TCAM action */
1743         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1744         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1745         bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1746         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1747                                * results, not need to convert
1748                                */
1749         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1750
1751         return hinic_set_fdir_tcam(nic_dev->hwdev,
1752                         TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1753 }
1754
1755 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1756 {
1757         struct tag_pa_rule bgp_rule;
1758         struct tag_pa_action bgp_action;
1759
1760         memset(&bgp_rule, 0, sizeof(bgp_rule));
1761         memset(&bgp_action, 0, sizeof(bgp_action));
1762         /* BGP TCAM rule */
1763         bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1764         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1765         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1766         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1767         bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1768         bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1769
1770         /* BGP TCAM action */
1771         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1772         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1773         bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1774         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1775                                * results, not need to convert
1776                                */
1777         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1778
1779         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1780                                         &bgp_rule, &bgp_action);
1781 }
1782
1783 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1784 {
1785         struct tag_pa_rule vrrp_rule;
1786         struct tag_pa_action vrrp_action;
1787
1788         memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1789         memset(&vrrp_action, 0, sizeof(vrrp_action));
1790         /* VRRP TCAM rule */
1791         vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1792         vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1793         vrrp_rule.ip_header.protocol.mask8 = 0xff;
1794         vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1795
1796         /* VRRP TCAM action */
1797         vrrp_action.err_type = 0x3f;
1798         vrrp_action.fwd_action = 0x7;
1799         vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1800         vrrp_action.pri = 0xf;
1801         vrrp_action.push_len = 0xf;
1802
1803         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1804                                         &vrrp_rule, &vrrp_action);
1805 }
1806
1807 /**
1808  *  Clear all fdir configuration.
1809  *
1810  * @param nic_dev
1811  *   The hardware interface of a Ethernet device.
1812  *
1813  * @return
1814  *   0 on success,
1815  *   negative error value otherwise.
1816  */
1817 void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
1818 {
1819         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
1820
1821         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
1822
1823         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
1824
1825         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1826
1827         (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
1828
1829         (void)hinic_flush_tcam_rule(nic_dev->hwdev);
1830 }
1831
1832 static int hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1833                        struct hinic_filter_info *filter_info)
1834 {
1835         switch (filter->filter_info.proto) {
1836         case IPPROTO_TCP:
1837                 /* Filter type is bgp type if dst_port or src_port is 179 */
1838                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1839                         !(filter->filter_info.dst_port_mask)) {
1840                         filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1841                 } else if (filter->filter_info.src_port ==
1842                         RTE_BE16(BGP_DPORT_ID) &&
1843                         !(filter->filter_info.src_port_mask)) {
1844                         filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1845                 } else {
1846                         PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1847                         " just support BGP now, proto:0x%x, "
1848                         "dst_port:0x%x, dst_port_mask:0x%x."
1849                         "src_port:0x%x, src_port_mask:0x%x.",
1850                         filter->filter_info.proto,
1851                         filter->filter_info.dst_port,
1852                         filter->filter_info.dst_port_mask,
1853                         filter->filter_info.src_port,
1854                         filter->filter_info.src_port_mask);
1855                         return -EINVAL;
1856                 }
1857                 break;
1858
1859         case IPPROTO_VRRP:
1860                 filter_info->pkt_type = PKT_VRRP_TYPE;
1861                 break;
1862
1863         case IPPROTO_ICMP:
1864                 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1865                 break;
1866
1867         case IPPROTO_ICMPV6:
1868                 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1869                 break;
1870
1871         default:
1872                 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1873                 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1874                 "src_port: 0x%x, src_port_mask: 0x%x.",
1875                 filter->filter_info.proto, filter->filter_info.dst_port,
1876                 filter->filter_info.dst_port_mask,
1877                 filter->filter_info.src_port,
1878                 filter->filter_info.src_port_mask);
1879                 return -EINVAL;
1880         }
1881
1882         return 0;
1883 }
1884
1885 static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1886                         struct hinic_filter_info *filter_info, int *index)
1887 {
1888         int type_id;
1889
1890         type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1891
1892         if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1893                 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1894                 return -EINVAL;
1895         }
1896
1897         if (!(filter_info->type_mask & (1 << type_id))) {
1898                 filter_info->type_mask |= 1 << type_id;
1899                 filter->index = type_id;
1900                 filter_info->pkt_filters[type_id].enable = true;
1901                 filter_info->pkt_filters[type_id].pkt_proto =
1902                                                 filter->filter_info.proto;
1903                 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1904                                   filter, entries);
1905         } else {
1906                 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1907                 return -EIO;
1908         }
1909
1910         *index = type_id;
1911         return 0;
1912 }
1913
1914 /*
1915  * Add a 5tuple filter
1916  *
1917  * @param dev:
1918  *  Pointer to struct rte_eth_dev.
1919  * @param filter:
1920  *  Pointer to the filter that will be added.
1921  * @return
1922  *    - On success, zero.
1923  *    - On failure, a negative value.
1924  */
1925 static int hinic_add_5tuple_filter(struct rte_eth_dev *dev,
1926                                 struct hinic_5tuple_filter *filter)
1927 {
1928         struct hinic_filter_info *filter_info =
1929                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1930         int i, ret_fw;
1931         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1932
1933         if (hinic_filter_info_init(filter, filter_info) ||
1934                 hinic_lookup_new_filter(filter, filter_info, &i))
1935                 return -EFAULT;
1936
1937         ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
1938                                         filter_info->qid,
1939                                         filter_info->pkt_filters[i].enable,
1940                                         true);
1941         if (ret_fw) {
1942                 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1943                         filter_info->pkt_type, filter->queue,
1944                         filter_info->pkt_filters[i].enable);
1945                 return -EFAULT;
1946         }
1947
1948         PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1949                         filter_info->pkt_type, filter_info->qid,
1950                         filter_info->pkt_filters[filter->index].enable);
1951
1952         switch (filter->filter_info.proto) {
1953         case IPPROTO_TCP:
1954                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
1955                         ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
1956                         if (ret_fw) {
1957                                 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
1958                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
1959                                         filter_info->pkt_type, filter->queue,
1960                                         filter_info->pkt_filters[i].enable);
1961                                 return -EFAULT;
1962                         }
1963
1964                         PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
1965                                 filter->queue,
1966                                 filter_info->pkt_filters[i].enable);
1967                 } else if (filter->filter_info.src_port ==
1968                         RTE_BE16(BGP_DPORT_ID)) {
1969                         ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
1970                         if (ret_fw) {
1971                                 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
1972                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
1973                                         filter_info->pkt_type, filter->queue,
1974                                         filter_info->pkt_filters[i].enable);
1975                                 return -EFAULT;
1976                         }
1977
1978                         PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
1979                                         filter->queue,
1980                                         filter_info->pkt_filters[i].enable);
1981                 }
1982
1983                 break;
1984
1985         case IPPROTO_VRRP:
1986                 ret_fw = hinic_set_vrrp_tcam(nic_dev);
1987                 if (ret_fw) {
1988                         PMD_DRV_LOG(ERR, "Set VRRP failed, "
1989                                 "type: 0x%x, qid: 0x%x, enable: 0x%x",
1990                                 filter_info->pkt_type, filter->queue,
1991                                 filter_info->pkt_filters[i].enable);
1992                         return -EFAULT;
1993                 }
1994                 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
1995                                 filter->queue,
1996                                 filter_info->pkt_filters[i].enable);
1997                 break;
1998
1999         default:
2000                 break;
2001         }
2002
2003         return 0;
2004 }
2005
2006 /*
2007  * Remove a 5tuple filter
2008  *
2009  * @param dev
2010  *  Pointer to struct rte_eth_dev.
2011  * @param filter
2012  *  The pointer of the filter will be removed.
2013  */
2014 static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
2015                            struct hinic_5tuple_filter *filter)
2016 {
2017         struct hinic_filter_info *filter_info =
2018                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2019         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2020
2021         switch (filter->filter_info.proto) {
2022         case IPPROTO_VRRP:
2023                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
2024                 break;
2025
2026         case IPPROTO_TCP:
2027                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
2028                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2029                                                         TCAM_PKT_BGP_DPORT);
2030                 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
2031                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2032                                                         TCAM_PKT_BGP_SPORT);
2033                 break;
2034
2035         default:
2036                 break;
2037         }
2038
2039         hinic_filter_info_init(filter, filter_info);
2040
2041         filter_info->pkt_filters[filter->index].enable = false;
2042         filter_info->pkt_filters[filter->index].pkt_proto = 0;
2043
2044         PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2045                 filter_info->pkt_type,
2046                 filter_info->pkt_filters[filter->index].qid,
2047                 filter_info->pkt_filters[filter->index].enable);
2048         (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2049                                 filter_info->pkt_filters[filter->index].qid,
2050                                 filter_info->pkt_filters[filter->index].enable,
2051                                 true);
2052
2053         filter_info->pkt_type = 0;
2054         filter_info->qid = 0;
2055         filter_info->pkt_filters[filter->index].qid = 0;
2056         filter_info->type_mask &= ~(1 <<  (filter->index));
2057         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
2058
2059         rte_free(filter);
2060 }
2061
2062 /*
2063  * Add or delete a ntuple filter
2064  *
2065  * @param dev
2066  *  Pointer to struct rte_eth_dev.
2067  * @param ntuple_filter
2068  *  Pointer to struct rte_eth_ntuple_filter
2069  * @param add
2070  *  If true, add filter; if false, remove filter
2071  * @return
2072  *    - On success, zero.
2073  *    - On failure, a negative value.
2074  */
2075 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
2076                                 struct rte_eth_ntuple_filter *ntuple_filter,
2077                                 bool add)
2078 {
2079         struct hinic_filter_info *filter_info =
2080                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2081         struct hinic_5tuple_filter_info filter_5tuple;
2082         struct hinic_5tuple_filter *filter;
2083         int ret;
2084
2085         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
2086                 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
2087                 return -EINVAL;
2088         }
2089
2090         memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
2091         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
2092         if (ret < 0)
2093                 return ret;
2094
2095         filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
2096                                          &filter_5tuple);
2097         if (filter != NULL && add) {
2098                 PMD_DRV_LOG(ERR, "Filter exists.");
2099                 return -EEXIST;
2100         }
2101         if (filter == NULL && !add) {
2102                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2103                 return -ENOENT;
2104         }
2105
2106         if (add) {
2107                 filter = rte_zmalloc("hinic_5tuple_filter",
2108                                 sizeof(struct hinic_5tuple_filter), 0);
2109                 if (filter == NULL)
2110                         return -ENOMEM;
2111                 rte_memcpy(&filter->filter_info, &filter_5tuple,
2112                                 sizeof(struct hinic_5tuple_filter_info));
2113                 filter->queue = ntuple_filter->queue;
2114
2115                 filter_info->qid = ntuple_filter->queue;
2116
2117                 ret = hinic_add_5tuple_filter(dev, filter);
2118                 if (ret)
2119                         rte_free(filter);
2120
2121                 return ret;
2122         }
2123
2124         hinic_remove_5tuple_filter(dev, filter);
2125
2126         return 0;
2127 }
2128
2129 static inline int
2130 hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
2131 {
2132         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
2133                 return -EINVAL;
2134
2135         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2136                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
2137                 PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
2138                         " ethertype filter", filter->ether_type);
2139                 return -EINVAL;
2140         }
2141
2142         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
2143                 PMD_DRV_LOG(ERR, "Mac compare is not supported");
2144                 return -EINVAL;
2145         }
2146         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2147                 PMD_DRV_LOG(ERR, "Drop option is not supported");
2148                 return -EINVAL;
2149         }
2150
2151         return 0;
2152 }
2153
2154 static inline int
2155 hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
2156                               struct hinic_pkt_filter *ethertype_filter)
2157 {
2158         switch (ethertype_filter->pkt_proto) {
2159         case RTE_ETHER_TYPE_SLOW:
2160                 filter_info->pkt_type = PKT_LACP_TYPE;
2161                 break;
2162
2163         case RTE_ETHER_TYPE_ARP:
2164                 filter_info->pkt_type = PKT_ARP_TYPE;
2165                 break;
2166
2167         default:
2168                 PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
2169                 return -EIO;
2170         }
2171
2172         return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
2173 }
2174
2175 static inline int
2176 hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
2177                               struct hinic_pkt_filter *ethertype_filter)
2178 {
2179         int id;
2180
2181         /* Find LACP or VRRP type id */
2182         id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
2183         if (id < 0)
2184                 return -EINVAL;
2185
2186         if (!(filter_info->type_mask & (1 << id))) {
2187                 filter_info->type_mask |= 1 << id;
2188                 filter_info->pkt_filters[id].pkt_proto =
2189                         ethertype_filter->pkt_proto;
2190                 filter_info->pkt_filters[id].enable = ethertype_filter->enable;
2191                 filter_info->qid = ethertype_filter->qid;
2192                 return id;
2193         }
2194
2195         PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
2196         return -EINVAL;
2197 }
2198
2199 static inline void
2200 hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
2201                               uint8_t idx)
2202 {
2203         if (idx >= HINIC_MAX_Q_FILTERS)
2204                 return;
2205
2206         filter_info->pkt_type = 0;
2207         filter_info->type_mask &= ~(1 << idx);
2208         filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
2209         filter_info->pkt_filters[idx].enable = FALSE;
2210         filter_info->pkt_filters[idx].qid = 0;
2211 }
2212
2213 static inline int
2214 hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
2215                                struct rte_eth_ethertype_filter *filter,
2216                                bool add)
2217 {
2218         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2219         struct hinic_filter_info *filter_info =
2220                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2221         struct hinic_pkt_filter ethertype_filter;
2222         int i;
2223         int ret_fw;
2224
2225         if (hinic_check_ethertype_filter(filter))
2226                 return -EINVAL;
2227
2228         if (add) {
2229                 ethertype_filter.pkt_proto = filter->ether_type;
2230                 ethertype_filter.enable = TRUE;
2231                 ethertype_filter.qid = (u8)filter->queue;
2232                 i = hinic_ethertype_filter_insert(filter_info,
2233                                                     &ethertype_filter);
2234                 if (i < 0)
2235                         return -ENOSPC;
2236
2237                 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
2238                                 filter_info->pkt_type, filter_info->qid,
2239                                 filter_info->pkt_filters[i].enable, true);
2240                 if (ret_fw) {
2241                         PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2242                                 filter_info->pkt_type, filter->queue,
2243                                 filter_info->pkt_filters[i].enable);
2244
2245                         hinic_ethertype_filter_remove(filter_info, i);
2246                         return -ENOENT;
2247                 }
2248                 PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2249                                 filter_info->pkt_type, filter->queue,
2250                                 filter_info->pkt_filters[i].enable);
2251
2252                 switch (ethertype_filter.pkt_proto) {
2253                 case RTE_ETHER_TYPE_SLOW:
2254                         ret_fw = hinic_set_lacp_tcam(nic_dev);
2255                         if (ret_fw) {
2256                                 PMD_DRV_LOG(ERR, "Add lacp tcam failed");
2257                                 hinic_ethertype_filter_remove(filter_info, i);
2258                                 return -ENOENT;
2259                         }
2260
2261                         PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
2262                         break;
2263                 default:
2264                         break;
2265                 }
2266         } else {
2267                 ethertype_filter.pkt_proto = filter->ether_type;
2268                 i = hinic_ethertype_filter_lookup(filter_info,
2269                                                 &ethertype_filter);
2270
2271                 if ((filter_info->type_mask & (1 << i))) {
2272                         filter_info->pkt_filters[i].enable = FALSE;
2273                         (void)hinic_set_fdir_filter(nic_dev->hwdev,
2274                                         filter_info->pkt_type,
2275                                         filter_info->pkt_filters[i].qid,
2276                                         filter_info->pkt_filters[i].enable,
2277                                         true);
2278
2279                         PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2280                                         filter_info->pkt_type,
2281                                         filter_info->pkt_filters[i].qid,
2282                                         filter_info->pkt_filters[i].enable);
2283
2284                         switch (ethertype_filter.pkt_proto) {
2285                         case RTE_ETHER_TYPE_SLOW:
2286                                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2287                                                                 TCAM_PKT_LACP);
2288                                 PMD_DRV_LOG(INFO, "Del lacp tcam succeed");
2289                                 break;
2290                         default:
2291                                 break;
2292                         }
2293
2294                         hinic_ethertype_filter_remove(filter_info, i);
2295
2296                 } else {
2297                         PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
2298                                         filter_info->pkt_type, filter->queue,
2299                                         filter_info->pkt_filters[i].enable);
2300                         return -ENOENT;
2301                 }
2302         }
2303
2304         return 0;
2305 }
2306
2307 static int hinic_fdir_info_init(struct hinic_fdir_rule *rule,
2308                                 struct hinic_fdir_info *fdir_info)
2309 {
2310         switch (rule->mask.src_ipv4_mask) {
2311         case UINT32_MAX:
2312                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
2313                 fdir_info->qid = rule->queue;
2314                 fdir_info->fdir_key = rule->hinic_fdir.src_ip;
2315                 return 0;
2316
2317         case 0:
2318                 break;
2319
2320         default:
2321                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
2322                 return -EINVAL;
2323         }
2324
2325         switch (rule->mask.dst_ipv4_mask) {
2326         case UINT32_MAX:
2327                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
2328                 fdir_info->qid = rule->queue;
2329                 fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
2330                 return 0;
2331
2332         case 0:
2333                 break;
2334
2335         default:
2336                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
2337                 return -EINVAL;
2338         }
2339
2340         if (fdir_info->fdir_flag == 0) {
2341                 PMD_DRV_LOG(ERR, "All support mask is NULL.");
2342                 return -EINVAL;
2343         }
2344
2345         return 0;
2346 }
2347
2348 static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
2349                                         struct hinic_fdir_rule *rule, bool add)
2350 {
2351         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2352         struct hinic_fdir_info fdir_info;
2353         int ret;
2354
2355         memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
2356
2357         ret = hinic_fdir_info_init(rule, &fdir_info);
2358         if (ret) {
2359                 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2360                 return ret;
2361         }
2362
2363         if (add) {
2364                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2365                                                 true, fdir_info.fdir_key,
2366                                                 true, fdir_info.fdir_flag);
2367                 if (ret) {
2368                         PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2369                                         fdir_info.fdir_flag, fdir_info.qid,
2370                                         fdir_info.fdir_key);
2371                         return -ENOENT;
2372                 }
2373                 PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2374                                 fdir_info.fdir_flag, fdir_info.qid,
2375                                 fdir_info.fdir_key);
2376         } else {
2377                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2378                                                 false, fdir_info.fdir_key, true,
2379                                                 fdir_info.fdir_flag);
2380                 if (ret) {
2381                         PMD_DRV_LOG(ERR, "Del fdir filter ailed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2382                                 fdir_info.fdir_flag, fdir_info.qid,
2383                                 fdir_info.fdir_key);
2384                         return -ENOENT;
2385                 }
2386                 PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2387                                 fdir_info.fdir_flag, fdir_info.qid,
2388                                 fdir_info.fdir_key);
2389         }
2390
2391         return 0;
2392 }
2393
2394 static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len)
2395 {
2396         u8 idx;
2397
2398         for (idx = 0; idx < len; idx++)
2399                 key_y[idx] = src_input[idx] & mask[idx];
2400 }
2401
2402 static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len)
2403 {
2404         u8 idx;
2405
2406         for (idx = 0; idx < len; idx++)
2407                 key_x[idx] = key_y[idx] ^ mask[idx];
2408 }
2409
2410 static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
2411                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2412 {
2413         tcam_translate_key_y(fdir_tcam_rule->key.y,
2414                 (u8 *)(&tcam_key->key_info),
2415                 (u8 *)(&tcam_key->key_mask),
2416                 TCAM_FLOW_KEY_SIZE);
2417         tcam_translate_key_x(fdir_tcam_rule->key.x,
2418                 fdir_tcam_rule->key.y,
2419                 (u8 *)(&tcam_key->key_mask),
2420                 TCAM_FLOW_KEY_SIZE);
2421 }
2422
2423 static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
2424                                 struct hinic_fdir_rule *rule,
2425                                 struct tag_tcam_key *tcam_key,
2426                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2427 {
2428         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2429
2430         switch (rule->mask.dst_ipv4_mask) {
2431         case UINT32_MAX:
2432                 tcam_key->key_info.ext_dip_h =
2433                         (rule->hinic_fdir.dst_ip >> 16) & 0xffffU;
2434                 tcam_key->key_info.ext_dip_l =
2435                         rule->hinic_fdir.dst_ip & 0xffffU;
2436                 tcam_key->key_mask.ext_dip_h =
2437                         (rule->mask.dst_ipv4_mask >> 16) & 0xffffU;
2438                 tcam_key->key_mask.ext_dip_l =
2439                         rule->mask.dst_ipv4_mask & 0xffffU;
2440                 break;
2441
2442         case 0:
2443                 break;
2444
2445         default:
2446                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2447                 return -EINVAL;
2448         }
2449
2450         if (rule->mask.dst_port_mask > 0) {
2451                 tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port;
2452                 tcam_key->key_mask.dst_port = rule->mask.dst_port_mask;
2453         }
2454
2455         if (rule->mask.src_port_mask > 0) {
2456                 tcam_key->key_info.src_port = rule->hinic_fdir.src_port;
2457                 tcam_key->key_mask.src_port = rule->mask.src_port_mask;
2458         }
2459
2460         switch (rule->mask.tunnel_flag) {
2461         case UINT16_MAX:
2462                 tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET;
2463                 tcam_key->key_mask.tunnel_flag = UINT8_MAX;
2464                 break;
2465
2466         case 0:
2467                 tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET;
2468                 tcam_key->key_mask.tunnel_flag = 0;
2469                 break;
2470
2471         default:
2472                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2473                 return -EINVAL;
2474         }
2475
2476         if (rule->mask.tunnel_inner_dst_port_mask > 0) {
2477                 tcam_key->key_info.dst_port =
2478                                         rule->hinic_fdir.tunnel_inner_dst_port;
2479                 tcam_key->key_mask.dst_port =
2480                                         rule->mask.tunnel_inner_dst_port_mask;
2481         }
2482
2483         if (rule->mask.tunnel_inner_src_port_mask > 0) {
2484                 tcam_key->key_info.src_port =
2485                                         rule->hinic_fdir.tunnel_inner_src_port;
2486                 tcam_key->key_mask.src_port =
2487                                         rule->mask.tunnel_inner_src_port_mask;
2488         }
2489
2490         switch (rule->mask.proto_mask) {
2491         case UINT16_MAX:
2492                 tcam_key->key_info.protocol = rule->hinic_fdir.proto;
2493                 tcam_key->key_mask.protocol = UINT8_MAX;
2494                 break;
2495
2496         case 0:
2497                 break;
2498
2499         default:
2500                 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2501                 return -EINVAL;
2502         }
2503
2504         tcam_key->key_mask.function_id = UINT16_MAX;
2505
2506         tcam_key->key_info.function_id = hinic_global_func_id(nic_dev->hwdev);
2507
2508         fdir_tcam_rule->data.qid = rule->queue;
2509
2510         tcam_key_calculate(tcam_key, fdir_tcam_rule);
2511
2512         return 0;
2513 }
2514
2515 static inline struct hinic_tcam_filter *
2516 hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list,
2517                         struct tag_tcam_key *key)
2518 {
2519         struct hinic_tcam_filter *it;
2520
2521         TAILQ_FOREACH(it, filter_list, entries) {
2522                 if (memcmp(key, &it->tcam_key,
2523                         sizeof(struct tag_tcam_key)) == 0) {
2524                         return it;
2525                 }
2526         }
2527
2528         return NULL;
2529 }
2530
2531 static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev,
2532                                         struct hinic_tcam_info *tcam_info,
2533                                         struct hinic_tcam_filter *tcam_filter,
2534                                         u16 *tcam_index)
2535 {
2536         int index;
2537         int max_index;
2538         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2539
2540         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2541                 max_index = HINIC_VF_MAX_TCAM_FILTERS;
2542         else
2543                 max_index = HINIC_PF_MAX_TCAM_FILTERS;
2544
2545         for (index = 0; index < max_index; index++) {
2546                 if (tcam_info->tcam_index_array[index] == 0)
2547                         break;
2548         }
2549
2550         if (index == max_index) {
2551                 PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules",
2552                         hinic_global_func_id(nic_dev->hwdev), max_index);
2553                 return -EINVAL;
2554         }
2555
2556         tcam_filter->index = index;
2557         *tcam_index = index;
2558
2559         return 0;
2560 }
2561
2562 static int hinic_add_tcam_filter(struct rte_eth_dev *dev,
2563                                 struct hinic_tcam_filter *tcam_filter,
2564                                 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2565 {
2566         struct hinic_tcam_info *tcam_info =
2567                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2568         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2569         u16 index = 0;
2570         u16 tcam_block_index = 0;
2571         int rc;
2572
2573         if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index))
2574                 return -EINVAL;
2575
2576         if (tcam_info->tcam_rule_nums == 0) {
2577                 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2578                         rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2579                                 HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index);
2580                         if (rc != 0) {
2581                                 PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block failed!");
2582                                 return -EFAULT;
2583                         }
2584                 } else {
2585                         rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2586                                 HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index);
2587                         if (rc != 0) {
2588                                 PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block failed!");
2589                                 return -EFAULT;
2590                         }
2591                 }
2592
2593                 tcam_info->tcam_block_index = tcam_block_index;
2594         } else {
2595                 tcam_block_index = tcam_info->tcam_block_index;
2596         }
2597
2598         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2599                 fdir_tcam_rule->index =
2600                         HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index;
2601         } else {
2602                 fdir_tcam_rule->index =
2603                         tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index;
2604         }
2605
2606         rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule);
2607         if (rc != 0) {
2608                 PMD_DRV_LOG(ERR, "Fdir_tcam_rule add failed!");
2609                 return -EFAULT;
2610         }
2611
2612         PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x,"
2613                 "tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succeed",
2614                 hinic_global_func_id(nic_dev->hwdev), tcam_block_index,
2615                 fdir_tcam_rule->index, fdir_tcam_rule->data.qid,
2616                 tcam_info->tcam_rule_nums + 1);
2617
2618         if (tcam_info->tcam_rule_nums == 0) {
2619                 rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true);
2620                 if (rc < 0) {
2621                         (void)hinic_del_tcam_rule(nic_dev->hwdev,
2622                                                 fdir_tcam_rule->index);
2623                         return rc;
2624                 }
2625         }
2626
2627         TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries);
2628
2629         tcam_info->tcam_index_array[index] = 1;
2630         tcam_info->tcam_rule_nums++;
2631
2632         return 0;
2633 }
2634
2635 static int hinic_del_tcam_filter(struct rte_eth_dev *dev,
2636                                 struct hinic_tcam_filter *tcam_filter)
2637 {
2638         struct hinic_tcam_info *tcam_info =
2639                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2640         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2641         u32 index = 0;
2642         u16 tcam_block_index = tcam_info->tcam_block_index;
2643         int rc;
2644         u8 block_type = 0;
2645
2646         if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2647                 index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) +
2648                         tcam_filter->index;
2649                 block_type = HINIC_TCAM_BLOCK_TYPE_VF;
2650         } else {
2651                 index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS +
2652                         tcam_filter->index;
2653                 block_type = HINIC_TCAM_BLOCK_TYPE_PF;
2654         }
2655
2656         rc = hinic_del_tcam_rule(nic_dev->hwdev, index);
2657         if (rc != 0) {
2658                 PMD_DRV_LOG(ERR, "fdir_tcam_rule del failed!");
2659                 return -EFAULT;
2660         }
2661
2662         PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, "
2663                 "tcam_block_id: %d, index: %d, tcam_rule_nums: %d succeed",
2664                 hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index,
2665                 tcam_info->tcam_rule_nums - 1);
2666
2667         TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries);
2668
2669         tcam_info->tcam_index_array[tcam_filter->index] = 0;
2670
2671         rte_free(tcam_filter);
2672
2673         tcam_info->tcam_rule_nums--;
2674
2675         if (tcam_info->tcam_rule_nums == 0) {
2676                 (void)hinic_free_tcam_block(nic_dev->hwdev, block_type,
2677                                         &tcam_block_index);
2678         }
2679
2680         return 0;
2681 }
2682
2683 static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
2684                                         struct hinic_fdir_rule *rule, bool add)
2685 {
2686         struct hinic_tcam_info *tcam_info =
2687                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2688         struct hinic_tcam_filter *tcam_filter;
2689         struct tag_tcam_cfg_rule fdir_tcam_rule;
2690         struct tag_tcam_key tcam_key;
2691         int ret;
2692
2693         memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule));
2694         memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key));
2695
2696         ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
2697         if (ret) {
2698                 PMD_DRV_LOG(ERR, "Init hiovs fdir info failed!");
2699                 return ret;
2700         }
2701
2702         tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list,
2703                                                 &tcam_key);
2704         if (tcam_filter != NULL && add) {
2705                 PMD_DRV_LOG(ERR, "Filter exists.");
2706                 return -EEXIST;
2707         }
2708         if (tcam_filter == NULL && !add) {
2709                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2710                 return -ENOENT;
2711         }
2712
2713         if (add) {
2714                 tcam_filter = rte_zmalloc("hiovs_5tuple_filter",
2715                                 sizeof(struct hinic_tcam_filter), 0);
2716                 if (tcam_filter == NULL)
2717                         return -ENOMEM;
2718                 (void)rte_memcpy(&tcam_filter->tcam_key,
2719                                  &tcam_key, sizeof(struct tag_tcam_key));
2720                 tcam_filter->queue = fdir_tcam_rule.data.qid;
2721
2722                 ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule);
2723                 if (ret < 0) {
2724                         rte_free(tcam_filter);
2725                         return ret;
2726                 }
2727
2728                 rule->tcam_index = fdir_tcam_rule.index;
2729
2730         } else {
2731                 PMD_DRV_LOG(ERR, "Begin to hiovs_del_tcam_filter");
2732                 ret = hinic_del_tcam_filter(dev, tcam_filter);
2733                 if (ret < 0)
2734                         return ret;
2735         }
2736
2737         return 0;
2738 }
2739
2740 /**
2741  * Create or destroy a flow rule.
2742  * Theorically one rule can match more than one filters.
2743  * We will let it use the filter which it hitt first.
2744  * So, the sequence matters.
2745  */
2746 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
2747                                         const struct rte_flow_attr *attr,
2748                                         const struct rte_flow_item pattern[],
2749                                         const struct rte_flow_action actions[],
2750                                         struct rte_flow_error *error)
2751 {
2752         int ret;
2753         struct rte_eth_ntuple_filter ntuple_filter;
2754         struct rte_eth_ethertype_filter ethertype_filter;
2755         struct hinic_fdir_rule fdir_rule;
2756         struct rte_flow *flow = NULL;
2757         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2758         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2759         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2760         struct hinic_flow_mem *hinic_flow_mem_ptr;
2761         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2762
2763         flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
2764         if (!flow) {
2765                 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
2766                 return NULL;
2767         }
2768
2769         hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
2770                         sizeof(struct hinic_flow_mem), 0);
2771         if (!hinic_flow_mem_ptr) {
2772                 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
2773                 rte_free(flow);
2774                 return NULL;
2775         }
2776
2777         hinic_flow_mem_ptr->flow = flow;
2778         TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2779                                 entries);
2780
2781         /* Add ntuple filter */
2782         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2783         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
2784                         actions, &ntuple_filter, error);
2785         if (!ret) {
2786                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2787                 if (!ret) {
2788                         ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
2789                                 sizeof(struct hinic_ntuple_filter_ele), 0);
2790                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2791                                    &ntuple_filter,
2792                                    sizeof(struct rte_eth_ntuple_filter));
2793                         TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
2794                         ntuple_filter_ptr, entries);
2795                         flow->rule = ntuple_filter_ptr;
2796                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2797
2798                         PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
2799                         hinic_global_func_id(nic_dev->hwdev));
2800                         return flow;
2801                 }
2802                 goto out;
2803         }
2804
2805         /* Add ethertype filter */
2806         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2807         ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
2808                                         &ethertype_filter, error);
2809         if (!ret) {
2810                 ret = hinic_add_del_ethertype_filter(dev, &ethertype_filter,
2811                                                      TRUE);
2812                 if (!ret) {
2813                         ethertype_filter_ptr =
2814                                 rte_zmalloc("hinic_ethertype_filter",
2815                                 sizeof(struct hinic_ethertype_filter_ele), 0);
2816                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2817                                 &ethertype_filter,
2818                                 sizeof(struct rte_eth_ethertype_filter));
2819                         TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
2820                                 ethertype_filter_ptr, entries);
2821                         flow->rule = ethertype_filter_ptr;
2822                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2823
2824                         PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
2825                                         hinic_global_func_id(nic_dev->hwdev));
2826                         return flow;
2827                 }
2828                 goto out;
2829         }
2830
2831         /* Add fdir filter */
2832         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
2833         ret = hinic_parse_fdir_filter(dev, attr, pattern,
2834                                       actions, &fdir_rule, error);
2835         if (!ret) {
2836                 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
2837                         ret = hinic_add_del_fdir_filter(dev,
2838                                         &fdir_rule, TRUE);
2839                 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
2840                         ret = hinic_add_del_tcam_fdir_filter(dev,
2841                                         &fdir_rule, TRUE);
2842                 }  else {
2843                         PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong");
2844                         goto out;
2845                 }
2846                 if (!ret) {
2847                         fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
2848                                 sizeof(struct hinic_fdir_rule_ele), 0);
2849                         rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
2850                                 sizeof(struct hinic_fdir_rule));
2851                         TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
2852                                 fdir_rule_ptr, entries);
2853                         flow->rule = fdir_rule_ptr;
2854                         flow->filter_type = RTE_ETH_FILTER_FDIR;
2855
2856                         PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
2857                                         hinic_global_func_id(nic_dev->hwdev));
2858                         return flow;
2859                 }
2860                 goto out;
2861         }
2862
2863 out:
2864         TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
2865         rte_flow_error_set(error, -ret,
2866                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2867                            "Failed to create flow.");
2868         rte_free(hinic_flow_mem_ptr);
2869         rte_free(flow);
2870         return NULL;
2871 }
2872
2873 /* Destroy a flow rule on hinic. */
2874 static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2875                                 struct rte_flow_error *error)
2876 {
2877         int ret;
2878         struct rte_flow *pmd_flow = flow;
2879         enum rte_filter_type filter_type = pmd_flow->filter_type;
2880         struct rte_eth_ntuple_filter ntuple_filter;
2881         struct rte_eth_ethertype_filter ethertype_filter;
2882         struct hinic_fdir_rule fdir_rule;
2883         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2884         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2885         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2886         struct hinic_flow_mem *hinic_flow_mem_ptr;
2887         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2888
2889         switch (filter_type) {
2890         case RTE_ETH_FILTER_NTUPLE:
2891                 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
2892                                         pmd_flow->rule;
2893                 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
2894                         sizeof(struct rte_eth_ntuple_filter));
2895                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2896                 if (!ret) {
2897                         TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
2898                                 ntuple_filter_ptr, entries);
2899                         rte_free(ntuple_filter_ptr);
2900                 }
2901                 break;
2902         case RTE_ETH_FILTER_ETHERTYPE:
2903                 ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
2904                                         pmd_flow->rule;
2905                 rte_memcpy(&ethertype_filter,
2906                         &ethertype_filter_ptr->filter_info,
2907                         sizeof(struct rte_eth_ethertype_filter));
2908                 ret = hinic_add_del_ethertype_filter(dev,
2909                                 &ethertype_filter, FALSE);
2910                 if (!ret) {
2911                         TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
2912                                 ethertype_filter_ptr, entries);
2913                         rte_free(ethertype_filter_ptr);
2914                 }
2915                 break;
2916         case RTE_ETH_FILTER_FDIR:
2917                 fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
2918                 rte_memcpy(&fdir_rule,
2919                         &fdir_rule_ptr->filter_info,
2920                         sizeof(struct hinic_fdir_rule));
2921                 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
2922                         ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
2923                 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
2924                         ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
2925                                                                 FALSE);
2926                 } else {
2927                         PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!");
2928                         ret = -EINVAL;
2929                 }
2930                 if (!ret) {
2931                         TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
2932                                 fdir_rule_ptr, entries);
2933                         rte_free(fdir_rule_ptr);
2934                 }
2935                 break;
2936         default:
2937                 PMD_DRV_LOG(WARNING, "Filter type (%d) is not supported",
2938                         filter_type);
2939                 ret = -EINVAL;
2940                 break;
2941         }
2942
2943         if (ret) {
2944                 rte_flow_error_set(error, EINVAL,
2945                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2946                                 NULL, "Failed to destroy flow");
2947                 return ret;
2948         }
2949
2950         TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
2951                 if (hinic_flow_mem_ptr->flow == pmd_flow) {
2952                         TAILQ_REMOVE(&nic_dev->hinic_flow_list,
2953                                 hinic_flow_mem_ptr, entries);
2954                         rte_free(hinic_flow_mem_ptr);
2955                         break;
2956                 }
2957         }
2958         rte_free(flow);
2959
2960         PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
2961                         hinic_global_func_id(nic_dev->hwdev));
2962
2963         return ret;
2964 }
2965
2966 /* Remove all the n-tuple filters */
2967 static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev)
2968 {
2969         struct hinic_filter_info *filter_info =
2970                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2971         struct hinic_5tuple_filter *p_5tuple;
2972
2973         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
2974                 hinic_remove_5tuple_filter(dev, p_5tuple);
2975 }
2976
2977 /* Remove all the ether type filters */
2978 static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
2979 {
2980         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2981         struct hinic_filter_info *filter_info =
2982                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
2983         int ret = 0;
2984
2985         if (filter_info->type_mask &
2986                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) {
2987                 hinic_ethertype_filter_remove(filter_info,
2988                         HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE));
2989                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE,
2990                                         filter_info->qid, false, true);
2991
2992                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
2993         }
2994
2995         if (filter_info->type_mask &
2996                 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) {
2997                 hinic_ethertype_filter_remove(filter_info,
2998                         HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE));
2999                 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE,
3000                         filter_info->qid, false, true);
3001         }
3002
3003         if (ret)
3004                 PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x",
3005                                 filter_info->pkt_type);
3006 }
3007
3008 /* Remove all the ether type filters */
3009 static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
3010 {
3011         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3012         struct hinic_tcam_info *tcam_info =
3013                 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
3014         struct hinic_tcam_filter *tcam_filter_ptr;
3015
3016         while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list)))
3017                 (void)hinic_del_tcam_filter(dev, tcam_filter_ptr);
3018
3019         (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
3020
3021         (void)hinic_flush_tcam_rule(nic_dev->hwdev);
3022 }
3023
3024 static void hinic_filterlist_flush(struct rte_eth_dev *dev)
3025 {
3026         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3027         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3028         struct hinic_fdir_rule_ele *fdir_rule_ptr;
3029         struct hinic_flow_mem *hinic_flow_mem_ptr;
3030         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3031
3032         while ((ntuple_filter_ptr =
3033                         TAILQ_FIRST(&nic_dev->filter_ntuple_list))) {
3034                 TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr,
3035                                  entries);
3036                 rte_free(ntuple_filter_ptr);
3037         }
3038
3039         while ((ethertype_filter_ptr =
3040                         TAILQ_FIRST(&nic_dev->filter_ethertype_list))) {
3041                 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3042                                 ethertype_filter_ptr,
3043                                 entries);
3044                 rte_free(ethertype_filter_ptr);
3045         }
3046
3047         while ((fdir_rule_ptr =
3048                         TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) {
3049                 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr,
3050                                  entries);
3051                 rte_free(fdir_rule_ptr);
3052         }
3053
3054         while ((hinic_flow_mem_ptr =
3055                         TAILQ_FIRST(&nic_dev->hinic_flow_list))) {
3056                 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
3057                                  entries);
3058                 rte_free(hinic_flow_mem_ptr->flow);
3059                 rte_free(hinic_flow_mem_ptr);
3060         }
3061 }
3062
3063 /* Destroy all flow rules associated with a port on hinic. */
3064 static int hinic_flow_flush(struct rte_eth_dev *dev,
3065                                 __rte_unused struct rte_flow_error *error)
3066 {
3067         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3068
3069         hinic_clear_all_ntuple_filter(dev);
3070         hinic_clear_all_ethertype_filter(dev);
3071         hinic_clear_all_fdir_filter(dev);
3072         hinic_filterlist_flush(dev);
3073
3074         PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x",
3075                         hinic_global_func_id(nic_dev->hwdev));
3076         return 0;
3077 }
3078
3079 void hinic_destroy_fdir_filter(struct rte_eth_dev *dev)
3080 {
3081         hinic_clear_all_ntuple_filter(dev);
3082         hinic_clear_all_ethertype_filter(dev);
3083         hinic_clear_all_fdir_filter(dev);
3084         hinic_filterlist_flush(dev);
3085 }
3086
3087 const struct rte_flow_ops hinic_flow_ops = {
3088         .validate = hinic_flow_validate,
3089         .create = hinic_flow_create,
3090         .destroy = hinic_flow_destroy,
3091         .flush = hinic_flow_flush,
3092 };
3093