net/hinic: create and destroy flow director filter
[dpdk.git] / drivers / net / hinic / hinic_pmd_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
25
26 #define HINIC_MAX_RX_QUEUE_NUM          64
27
28 #ifndef UINT8_MAX
29 #define UINT8_MAX          (u8)(~((u8)0))       /* 0xFF               */
30 #define UINT16_MAX         (u16)(~((u16)0))     /* 0xFFFF             */
31 #define UINT32_MAX         (u32)(~((u32)0))     /* 0xFFFFFFFF         */
32 #define UINT64_MAX         (u64)(~((u64)0))     /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX          (0x7F)
34 #endif
35
36 /* IPSURX MACRO */
37 #define PA_ETH_TYPE_ROCE                0
38 #define PA_ETH_TYPE_IPV4                1
39 #define PA_ETH_TYPE_IPV6                2
40 #define PA_ETH_TYPE_OTHER               3
41
42 #define PA_IP_PROTOCOL_TYPE_TCP         1
43 #define PA_IP_PROTOCOL_TYPE_UDP         2
44 #define PA_IP_PROTOCOL_TYPE_ICMP        3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP   4
46 #define PA_IP_PROTOCOL_TYPE_SCTP        5
47 #define PA_IP_PROTOCOL_TYPE_VRRP        112
48
49 #define IP_HEADER_PROTOCOL_TYPE_TCP     6
50
51 #define HINIC_MIN_N_TUPLE_PRIO          1
52 #define HINIC_MAX_N_TUPLE_PRIO          7
53
54 /* TCAM type mask in hardware */
55 #define TCAM_PKT_BGP_SPORT      1
56 #define TCAM_PKT_VRRP           2
57 #define TCAM_PKT_BGP_DPORT      3
58 #define TCAM_PKT_LACP           4
59
60 #define BGP_DPORT_ID            179
61 #define IPPROTO_VRRP            112
62
63 /* Packet type defined in hardware to perform filter */
64 #define PKT_IGMP_IPV4_TYPE     64
65 #define PKT_ICMP_IPV4_TYPE     65
66 #define PKT_ICMP_IPV6_TYPE     66
67 #define PKT_ICMP_IPV6RS_TYPE   67
68 #define PKT_ICMP_IPV6RA_TYPE   68
69 #define PKT_ICMP_IPV6NS_TYPE   69
70 #define PKT_ICMP_IPV6NA_TYPE   70
71 #define PKT_ICMP_IPV6RE_TYPE   71
72 #define PKT_DHCP_IPV4_TYPE     72
73 #define PKT_DHCP_IPV6_TYPE     73
74 #define PKT_LACP_TYPE          74
75 #define PKT_ARP_REQ_TYPE       79
76 #define PKT_ARP_REP_TYPE       80
77 #define PKT_ARP_TYPE           81
78 #define PKT_BGPD_DPORT_TYPE    83
79 #define PKT_BGPD_SPORT_TYPE    84
80 #define PKT_VRRP_TYPE          85
81
82 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
83         (&((struct hinic_nic_dev *)nic_dev)->filter)
84
85 enum hinic_atr_flow_type {
86         HINIC_ATR_FLOW_TYPE_IPV4_DIP    = 0x1,
87         HINIC_ATR_FLOW_TYPE_IPV4_SIP    = 0x2,
88         HINIC_ATR_FLOW_TYPE_DPORT       = 0x3,
89         HINIC_ATR_FLOW_TYPE_SPORT       = 0x4,
90 };
91
92 /* Structure to store fdir's info. */
93 struct hinic_fdir_info {
94         uint8_t fdir_flag;
95         uint8_t qid;
96         uint32_t fdir_key;
97 };
98
99 /**
100  * Endless loop will never happen with below assumption
101  * 1. there is at least one no-void item(END)
102  * 2. cur is before END.
103  */
104 static inline const struct rte_flow_item *
105 next_no_void_pattern(const struct rte_flow_item pattern[],
106                 const struct rte_flow_item *cur)
107 {
108         const struct rte_flow_item *next =
109                 cur ? cur + 1 : &pattern[0];
110         while (1) {
111                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
112                         return next;
113                 next++;
114         }
115 }
116
117 static inline const struct rte_flow_action *
118 next_no_void_action(const struct rte_flow_action actions[],
119                 const struct rte_flow_action *cur)
120 {
121         const struct rte_flow_action *next =
122                 cur ? cur + 1 : &actions[0];
123         while (1) {
124                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
125                         return next;
126                 next++;
127         }
128 }
129
130 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
131                                         struct rte_flow_error *error)
132 {
133         /* Must be input direction */
134         if (!attr->ingress) {
135                 rte_flow_error_set(error, EINVAL,
136                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
137                         attr, "Only support ingress.");
138                 return -rte_errno;
139         }
140
141         if (attr->egress) {
142                 rte_flow_error_set(error, EINVAL,
143                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
144                                 attr, "Not support egress.");
145                 return -rte_errno;
146         }
147
148         if (attr->priority) {
149                 rte_flow_error_set(error, EINVAL,
150                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
151                                 attr, "Not support priority.");
152                 return -rte_errno;
153         }
154
155         if (attr->group) {
156                 rte_flow_error_set(error, EINVAL,
157                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
158                                 attr, "Not support group.");
159                 return -rte_errno;
160         }
161
162         return 0;
163 }
164
165 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
166                                 const struct rte_flow_item *pattern,
167                                 const struct rte_flow_action *actions,
168                                 struct rte_flow_error *error)
169 {
170         if (!pattern) {
171                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
172                                 NULL, "NULL pattern.");
173                 return -rte_errno;
174         }
175
176         if (!actions) {
177                 rte_flow_error_set(error, EINVAL,
178                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
179                                 NULL, "NULL action.");
180                 return -rte_errno;
181         }
182
183         if (!attr) {
184                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
185                                    NULL, "NULL attribute.");
186                 return -rte_errno;
187         }
188
189         return 0;
190 }
191
192 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
193                                         struct rte_flow_error *error)
194 {
195         /* The first non-void item should be MAC */
196         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
197                 rte_flow_error_set(error, EINVAL,
198                         RTE_FLOW_ERROR_TYPE_ITEM,
199                         item, "Not supported by ethertype filter");
200                 return -rte_errno;
201         }
202
203         /* Not supported last point for range */
204         if (item->last) {
205                 rte_flow_error_set(error, EINVAL,
206                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
207                         item, "Not supported last point for range");
208                 return -rte_errno;
209         }
210
211         /* Get the MAC info. */
212         if (!item->spec || !item->mask) {
213                 rte_flow_error_set(error, EINVAL,
214                                 RTE_FLOW_ERROR_TYPE_ITEM,
215                                 item, "Not supported by ethertype filter");
216                 return -rte_errno;
217         }
218         return 0;
219 }
220
221 static int
222 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
223                         const struct rte_flow_action *act,
224                         const struct rte_flow_action_queue *act_q,
225                         struct rte_eth_ethertype_filter *filter,
226                         struct rte_flow_error *error)
227 {
228         /* Parse action */
229         act = next_no_void_action(actions, NULL);
230         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
231                 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
232                 rte_flow_error_set(error, EINVAL,
233                                 RTE_FLOW_ERROR_TYPE_ACTION,
234                                 act, "Not supported action.");
235                 return -rte_errno;
236         }
237
238         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
239                 act_q = (const struct rte_flow_action_queue *)act->conf;
240                 filter->queue = act_q->index;
241         } else {
242                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
243         }
244
245         /* Check if the next non-void item is END */
246         act = next_no_void_action(actions, act);
247         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
248                 rte_flow_error_set(error, EINVAL,
249                                 RTE_FLOW_ERROR_TYPE_ACTION,
250                                 act, "Not supported action.");
251                 return -rte_errno;
252         }
253
254         return 0;
255 }
256
257 /**
258  * Parse the rule to see if it is a ethertype rule.
259  * And get the ethertype filter info BTW.
260  * pattern:
261  * The first not void item can be ETH.
262  * The next not void item must be END.
263  * action:
264  * The first not void action should be QUEUE.
265  * The next not void action should be END.
266  * pattern example:
267  * ITEM         Spec                    Mask
268  * ETH          type    0x0807          0xFFFF
269  * END
270  * other members in mask and spec should set to 0x00.
271  * item->last should be NULL.
272  */
273 static int
274 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
275                         const struct rte_flow_item *pattern,
276                         const struct rte_flow_action *actions,
277                         struct rte_eth_ethertype_filter *filter,
278                         struct rte_flow_error *error)
279 {
280         const struct rte_flow_item *item;
281         const struct rte_flow_action *act = NULL;
282         const struct rte_flow_item_eth *eth_spec;
283         const struct rte_flow_item_eth *eth_mask;
284         const struct rte_flow_action_queue *act_q = NULL;
285
286         if (hinic_check_filter_arg(attr, pattern, actions, error))
287                 return -rte_errno;
288
289         item = next_no_void_pattern(pattern, NULL);
290         if (hinic_check_ethertype_first_item(item, error))
291                 return -rte_errno;
292
293         eth_spec = (const struct rte_flow_item_eth *)item->spec;
294         eth_mask = (const struct rte_flow_item_eth *)item->mask;
295
296         /*
297          * Mask bits of source MAC address must be full of 0.
298          * Mask bits of destination MAC address must be full
299          * of 1 or full of 0.
300          */
301         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
302             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
303              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
304                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
305                                 item, "Invalid ether address mask");
306                 return -rte_errno;
307         }
308
309         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
310                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
311                                 item, "Invalid ethertype mask");
312                 return -rte_errno;
313         }
314
315         /*
316          * If mask bits of destination MAC address
317          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
318          */
319         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
320                 filter->mac_addr = eth_spec->dst;
321                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
322         } else {
323                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
324         }
325         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
326
327         /* Check if the next non-void item is END. */
328         item = next_no_void_pattern(pattern, item);
329         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
330                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
331                         item, "Not supported by ethertype filter.");
332                 return -rte_errno;
333         }
334
335         if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
336                 return -rte_errno;
337
338         if (hinic_check_ethertype_attr_ele(attr, error))
339                 return -rte_errno;
340
341         return 0;
342 }
343
344 static int
345 hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
346                         const struct rte_flow_attr *attr,
347                         const struct rte_flow_item pattern[],
348                         const struct rte_flow_action actions[],
349                         struct rte_eth_ethertype_filter *filter,
350                         struct rte_flow_error *error)
351 {
352         if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
353                 return -rte_errno;
354
355         /* NIC doesn't support MAC address. */
356         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
357                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
358                 rte_flow_error_set(error, EINVAL,
359                         RTE_FLOW_ERROR_TYPE_ITEM,
360                         NULL, "Not supported by ethertype filter");
361                 return -rte_errno;
362         }
363
364         if (filter->queue >= dev->data->nb_rx_queues) {
365                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
366                 rte_flow_error_set(error, EINVAL,
367                         RTE_FLOW_ERROR_TYPE_ITEM,
368                         NULL, "Queue index much too big");
369                 return -rte_errno;
370         }
371
372         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
373                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
374                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
375                 rte_flow_error_set(error, EINVAL,
376                         RTE_FLOW_ERROR_TYPE_ITEM,
377                         NULL, "IPv4/IPv6 not supported by ethertype filter");
378                 return -rte_errno;
379         }
380
381         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
382                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
383                 rte_flow_error_set(error, EINVAL,
384                         RTE_FLOW_ERROR_TYPE_ITEM,
385                         NULL, "Drop option is unsupported");
386                 return -rte_errno;
387         }
388
389         /* Hinic only support LACP/ARP for ether type */
390         if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
391                 filter->ether_type != RTE_ETHER_TYPE_ARP) {
392                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
393                 rte_flow_error_set(error, EINVAL,
394                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
395                         "only lacp/arp type supported by ethertype filter");
396                 return -rte_errno;
397         }
398
399         return 0;
400 }
401
402 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
403                                 struct rte_eth_ntuple_filter *filter,
404                                 struct rte_flow_error *error)
405 {
406         /* Must be input direction */
407         if (!attr->ingress) {
408                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
409                 rte_flow_error_set(error, EINVAL,
410                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
411                                    attr, "Only support ingress.");
412                 return -rte_errno;
413         }
414
415         if (attr->egress) {
416                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
417                 rte_flow_error_set(error, EINVAL,
418                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
419                                    attr, "Not support egress.");
420                 return -rte_errno;
421         }
422
423         if (attr->priority > 0xFFFF) {
424                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
425                 rte_flow_error_set(error, EINVAL,
426                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
427                                    attr, "Error priority.");
428                 return -rte_errno;
429         }
430
431         if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
432                     attr->priority > HINIC_MAX_N_TUPLE_PRIO)
433                 filter->priority = 1;
434         else
435                 filter->priority = (uint16_t)attr->priority;
436
437         return 0;
438 }
439
440 static int
441 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
442                         const struct rte_flow_action actions[],
443                         struct rte_eth_ntuple_filter *filter,
444                         struct rte_flow_error *error)
445 {
446         const struct rte_flow_action *act;
447         /*
448          * n-tuple only supports forwarding,
449          * check if the first not void action is QUEUE.
450          */
451         act = next_no_void_action(actions, NULL);
452         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
453                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
454                 rte_flow_error_set(error, EINVAL,
455                         RTE_FLOW_ERROR_TYPE_ACTION,
456                         act, "Flow action type is not QUEUE.");
457                 return -rte_errno;
458         }
459         filter->queue =
460                 ((const struct rte_flow_action_queue *)act->conf)->index;
461
462         /* Check if the next not void item is END */
463         act = next_no_void_action(actions, act);
464         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
465                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466                 rte_flow_error_set(error, EINVAL,
467                         RTE_FLOW_ERROR_TYPE_ACTION,
468                         act, "Next not void item is not END.");
469                 return -rte_errno;
470         }
471
472         return 0;
473 }
474
475 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
476                                         const struct rte_flow_item pattern[],
477                                         struct rte_flow_error *error)
478 {
479         const struct rte_flow_item *item;
480
481         /* The first not void item can be MAC or IPv4 */
482         item = next_no_void_pattern(pattern, NULL);
483
484         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
485                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
486                 rte_flow_error_set(error, EINVAL,
487                         RTE_FLOW_ERROR_TYPE_ITEM,
488                         item, "Not supported by ntuple filter");
489                 return -rte_errno;
490         }
491
492         /* Skip Ethernet */
493         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
494                 /* Not supported last point for range */
495                 if (item->last) {
496                         rte_flow_error_set(error,
497                                 EINVAL,
498                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
499                                 item, "Not supported last point for range");
500                         return -rte_errno;
501                 }
502                 /* if the first item is MAC, the content should be NULL */
503                 if (item->spec || item->mask) {
504                         rte_flow_error_set(error, EINVAL,
505                                 RTE_FLOW_ERROR_TYPE_ITEM,
506                                 item, "Not supported by ntuple filter");
507                         return -rte_errno;
508                 }
509                 /* check if the next not void item is IPv4 */
510                 item = next_no_void_pattern(pattern, item);
511                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
512                         rte_flow_error_set(error,
513                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
514                                 item, "Not supported by ntuple filter");
515                         return -rte_errno;
516                 }
517         }
518
519         *ipv4_item = item;
520         return 0;
521 }
522
523 static int
524 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
525                         const struct rte_flow_item pattern[],
526                         struct rte_eth_ntuple_filter *filter,
527                         struct rte_flow_error *error)
528 {
529         const struct rte_flow_item_ipv4 *ipv4_spec;
530         const struct rte_flow_item_ipv4 *ipv4_mask;
531         const struct rte_flow_item *item = *in_out_item;
532
533         /* Get the IPv4 info */
534         if (!item->spec || !item->mask) {
535                 rte_flow_error_set(error, EINVAL,
536                         RTE_FLOW_ERROR_TYPE_ITEM,
537                         item, "Invalid ntuple mask");
538                 return -rte_errno;
539         }
540         /* Not supported last point for range */
541         if (item->last) {
542                 rte_flow_error_set(error, EINVAL,
543                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
544                         item, "Not supported last point for range");
545                 return -rte_errno;
546         }
547
548         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
549         /*
550          * Only support src & dst addresses, protocol,
551          * others should be masked.
552          */
553         if (ipv4_mask->hdr.version_ihl ||
554                 ipv4_mask->hdr.type_of_service ||
555                 ipv4_mask->hdr.total_length ||
556                 ipv4_mask->hdr.packet_id ||
557                 ipv4_mask->hdr.fragment_offset ||
558                 ipv4_mask->hdr.time_to_live ||
559                 ipv4_mask->hdr.hdr_checksum ||
560                 !ipv4_mask->hdr.next_proto_id) {
561                 rte_flow_error_set(error,
562                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
563                         item, "Not supported by ntuple filter");
564                 return -rte_errno;
565         }
566
567         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
568         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
569         filter->proto_mask = ipv4_mask->hdr.next_proto_id;
570
571         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
572         filter->dst_ip = ipv4_spec->hdr.dst_addr;
573         filter->src_ip = ipv4_spec->hdr.src_addr;
574         filter->proto  = ipv4_spec->hdr.next_proto_id;
575
576         /* Get next no void item */
577         *in_out_item = next_no_void_pattern(pattern, item);
578         return 0;
579 }
580
581 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
582                                 const struct rte_flow_item pattern[],
583                                 struct rte_eth_ntuple_filter *filter,
584                                 struct rte_flow_error *error)
585 {
586         const struct rte_flow_item_tcp *tcp_spec;
587         const struct rte_flow_item_tcp *tcp_mask;
588         const struct rte_flow_item_icmp *icmp_mask;
589         const struct rte_flow_item *item = *in_out_item;
590         u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
591
592         if (item->type == RTE_FLOW_ITEM_TYPE_END)
593                 return 0;
594
595         /* Get TCP or UDP info */
596         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
597                 (!item->spec || !item->mask)) {
598                 memset(filter, 0, ntuple_filter_size);
599                 rte_flow_error_set(error, EINVAL,
600                         RTE_FLOW_ERROR_TYPE_ITEM,
601                         item, "Invalid ntuple mask");
602                 return -rte_errno;
603         }
604
605         /* Not supported last point for range */
606         if (item->last) {
607                 memset(filter, 0, ntuple_filter_size);
608                 rte_flow_error_set(error, EINVAL,
609                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
610                         item, "Not supported last point for range");
611                 return -rte_errno;
612         }
613
614         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
615                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
616
617                 /*
618                  * Only support src & dst ports, tcp flags,
619                  * others should be masked.
620                  */
621                 if (tcp_mask->hdr.sent_seq ||
622                         tcp_mask->hdr.recv_ack ||
623                         tcp_mask->hdr.data_off ||
624                         tcp_mask->hdr.rx_win ||
625                         tcp_mask->hdr.cksum ||
626                         tcp_mask->hdr.tcp_urp) {
627                         memset(filter, 0, ntuple_filter_size);
628                         rte_flow_error_set(error, EINVAL,
629                                 RTE_FLOW_ERROR_TYPE_ITEM,
630                                 item, "Not supported by ntuple filter");
631                         return -rte_errno;
632                 }
633
634                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
635                 filter->src_port_mask  = tcp_mask->hdr.src_port;
636                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
637                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
638                 } else if (!tcp_mask->hdr.tcp_flags) {
639                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
640                 } else {
641                         memset(filter, 0, ntuple_filter_size);
642                         rte_flow_error_set(error, EINVAL,
643                                 RTE_FLOW_ERROR_TYPE_ITEM,
644                                 item, "Not supported by ntuple filter");
645                         return -rte_errno;
646                 }
647
648                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
649                 filter->dst_port  = tcp_spec->hdr.dst_port;
650                 filter->src_port  = tcp_spec->hdr.src_port;
651                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
652         } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
653                 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
654
655                 /* ICMP all should be masked. */
656                 if (icmp_mask->hdr.icmp_cksum ||
657                         icmp_mask->hdr.icmp_ident ||
658                         icmp_mask->hdr.icmp_seq_nb ||
659                         icmp_mask->hdr.icmp_type ||
660                         icmp_mask->hdr.icmp_code) {
661                         memset(filter, 0, ntuple_filter_size);
662                         rte_flow_error_set(error, EINVAL,
663                                 RTE_FLOW_ERROR_TYPE_ITEM,
664                                 item, "Not supported by ntuple filter");
665                         return -rte_errno;
666                 }
667         }
668
669         /* Get next no void item */
670         *in_out_item = next_no_void_pattern(pattern, item);
671         return 0;
672 }
673
674 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
675                                         struct rte_eth_ntuple_filter *filter,
676                                         struct rte_flow_error *error)
677 {
678         /* Check if the next not void item is END */
679         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
680                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
681                 rte_flow_error_set(error, EINVAL,
682                         RTE_FLOW_ERROR_TYPE_ITEM,
683                         item, "Not supported by ntuple filter");
684                 return -rte_errno;
685         }
686         return 0;
687 }
688
689 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
690                                         const struct rte_flow_item pattern[],
691                                         struct rte_eth_ntuple_filter *filter,
692                                         struct rte_flow_error *error)
693 {
694         if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
695                 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
696                 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
697                 hinic_ntuple_item_check_end(item, filter, error))
698                 return -rte_errno;
699
700         return 0;
701 }
702
703 /**
704  * Parse the rule to see if it is a n-tuple rule.
705  * And get the n-tuple filter info BTW.
706  * pattern:
707  * The first not void item can be ETH or IPV4.
708  * The second not void item must be IPV4 if the first one is ETH.
709  * The third not void item must be UDP or TCP.
710  * The next not void item must be END.
711  * action:
712  * The first not void action should be QUEUE.
713  * The next not void action should be END.
714  * pattern example:
715  * ITEM         Spec                    Mask
716  * ETH          NULL                    NULL
717  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
718  *              dst_addr 192.167.3.50   0xFFFFFFFF
719  *              next_proto_id   17      0xFF
720  * UDP/TCP/     src_port        80      0xFFFF
721  * SCTP         dst_port        80      0xFFFF
722  * END
723  * other members in mask and spec should set to 0x00.
724  * item->last should be NULL.
725  * Please aware there's an asumption for all the parsers.
726  * rte_flow_item is using big endian, rte_flow_attr and
727  * rte_flow_action are using CPU order.
728  * Because the pattern is used to describe the packets,
729  * normally the packets should use network order.
730  */
731 static int
732 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
733                         const struct rte_flow_item pattern[],
734                         const struct rte_flow_action actions[],
735                         struct rte_eth_ntuple_filter *filter,
736                         struct rte_flow_error *error)
737 {
738         const struct rte_flow_item *item = NULL;
739
740         if (hinic_check_filter_arg(attr, pattern, actions, error))
741                 return -rte_errno;
742
743         if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
744                 return -rte_errno;
745
746         if (hinic_check_ntuple_act_ele(item, actions, filter, error))
747                 return -rte_errno;
748
749         if (hinic_check_ntuple_attr_ele(attr, filter, error))
750                 return -rte_errno;
751
752         return 0;
753 }
754
755 static int
756 hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
757                         const struct rte_flow_attr *attr,
758                         const struct rte_flow_item pattern[],
759                         const struct rte_flow_action actions[],
760                         struct rte_eth_ntuple_filter *filter,
761                         struct rte_flow_error *error)
762 {
763         int ret;
764
765         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
766         if (ret)
767                 return ret;
768
769         /* Hinic doesn't support tcp flags */
770         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
771                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
772                 rte_flow_error_set(error, EINVAL,
773                                    RTE_FLOW_ERROR_TYPE_ITEM,
774                                    NULL, "Not supported by ntuple filter");
775                 return -rte_errno;
776         }
777
778         /* Hinic doesn't support many priorities */
779         if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
780             filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
781                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
782                 rte_flow_error_set(error, EINVAL,
783                         RTE_FLOW_ERROR_TYPE_ITEM,
784                         NULL, "Priority not supported by ntuple filter");
785                 return -rte_errno;
786         }
787
788         if (filter->queue >= dev->data->nb_rx_queues)
789                 return -rte_errno;
790
791         /* Fixed value for hinic */
792         filter->flags = RTE_5TUPLE_FLAGS;
793         return 0;
794 }
795
796 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
797                                         const struct rte_flow_item pattern[],
798                                         struct rte_flow_error *error)
799 {
800         const struct rte_flow_item *item;
801
802         /* The first not void item can be MAC or IPv4  or TCP or UDP */
803         item = next_no_void_pattern(pattern, NULL);
804
805         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
806                 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
807                 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
808                 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
809                 rte_flow_error_set(error, EINVAL,
810                         RTE_FLOW_ERROR_TYPE_ITEM, item,
811                         "Not supported by fdir filter,support mac,ipv4,tcp,udp");
812                 return -rte_errno;
813         }
814
815         /* Not supported last point for range */
816         if (item->last) {
817                 rte_flow_error_set(error, EINVAL,
818                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
819                         "Not supported last point for range");
820                 return -rte_errno;
821         }
822
823         /* Skip Ethernet */
824         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
825                 /* All should be masked. */
826                 if (item->spec || item->mask) {
827                         rte_flow_error_set(error, EINVAL,
828                                 RTE_FLOW_ERROR_TYPE_ITEM,
829                                 item, "Not supported by fdir filter,support mac");
830                         return -rte_errno;
831                 }
832                 /* Check if the next not void item is IPv4 */
833                 item = next_no_void_pattern(pattern, item);
834                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
835                         rte_flow_error_set(error, EINVAL,
836                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
837                                 "Not supported by fdir filter,support mac,ipv4");
838                         return -rte_errno;
839                 }
840         }
841
842         *ip_item = item;
843         return 0;
844 }
845
846 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
847                                 const struct rte_flow_item pattern[],
848                                 struct hinic_fdir_rule *rule,
849                                 struct rte_flow_error *error)
850 {
851         const struct rte_flow_item_ipv4 *ipv4_spec;
852         const struct rte_flow_item_ipv4 *ipv4_mask;
853         const struct rte_flow_item *item = *in_out_item;
854
855         /* Get the IPv4 info */
856         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
857                 /* Not supported last point for range */
858                 if (item->last) {
859                         rte_flow_error_set(error, EINVAL,
860                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
861                                 item, "Not supported last point for range");
862                         return -rte_errno;
863                 }
864
865                 if (!item->mask) {
866                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
867                         rte_flow_error_set(error, EINVAL,
868                                 RTE_FLOW_ERROR_TYPE_ITEM,
869                                 item, "Invalid fdir filter mask");
870                         return -rte_errno;
871                 }
872
873                 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
874                 /*
875                  * Only support src & dst addresses,
876                  * others should be masked.
877                  */
878                 if (ipv4_mask->hdr.version_ihl ||
879                         ipv4_mask->hdr.type_of_service ||
880                         ipv4_mask->hdr.total_length ||
881                         ipv4_mask->hdr.packet_id ||
882                         ipv4_mask->hdr.fragment_offset ||
883                         ipv4_mask->hdr.time_to_live ||
884                         ipv4_mask->hdr.next_proto_id ||
885                         ipv4_mask->hdr.hdr_checksum) {
886                         rte_flow_error_set(error,
887                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
888                                 "Not supported by fdir filter, support src,dst ip");
889                         return -rte_errno;
890                 }
891
892                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
893                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
894
895                 if (item->spec) {
896                         ipv4_spec =
897                                 (const struct rte_flow_item_ipv4 *)item->spec;
898                         rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
899                         rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
900                 }
901
902                 /*
903                  * Check if the next not void item is
904                  * TCP or UDP or END.
905                  */
906                 item = next_no_void_pattern(pattern, item);
907                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
908                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
909                     item->type != RTE_FLOW_ITEM_TYPE_END) {
910                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
911                         rte_flow_error_set(error, EINVAL,
912                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
913                                 "Not supported by fdir filter, support tcp, udp, end");
914                         return -rte_errno;
915                 }
916         }
917
918         *in_out_item = item;
919         return 0;
920 }
921
922 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
923                                 const struct rte_flow_item pattern[],
924                                 struct hinic_fdir_rule *rule,
925                                 struct rte_flow_error *error)
926 {
927         const struct rte_flow_item_tcp *tcp_spec;
928         const struct rte_flow_item_tcp *tcp_mask;
929         const struct rte_flow_item_udp *udp_spec;
930         const struct rte_flow_item_udp *udp_mask;
931         const struct rte_flow_item *item = *in_out_item;
932
933         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
934                 /* Not supported last point for range */
935                 if (item->last) {
936                         rte_flow_error_set(error, EINVAL,
937                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
938                                 item, "Not supported last point for range");
939                         return -rte_errno;
940                 }
941
942                 /* Get TCP/UDP info */
943                 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
944                         /*
945                          * Only care about src & dst ports,
946                          * others should be masked.
947                          */
948                         if (!item->mask) {
949                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
950                                 rte_flow_error_set(error, EINVAL,
951                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
952                                         "Not supported by fdir filter,support src,dst ports");
953                                 return -rte_errno;
954                         }
955
956                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
957                         if (tcp_mask->hdr.sent_seq ||
958                                 tcp_mask->hdr.recv_ack ||
959                                 tcp_mask->hdr.data_off ||
960                                 tcp_mask->hdr.tcp_flags ||
961                                 tcp_mask->hdr.rx_win ||
962                                 tcp_mask->hdr.cksum ||
963                                 tcp_mask->hdr.tcp_urp) {
964                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
965                                 rte_flow_error_set(error, EINVAL,
966                                         RTE_FLOW_ERROR_TYPE_ITEM,
967                                         item, "Not supported by fdir filter,support tcp");
968                                 return -rte_errno;
969                         }
970
971                         rule->mask.src_port_mask = tcp_mask->hdr.src_port;
972                         rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
973
974                         if (item->spec) {
975                                 tcp_spec =
976                                         (const struct rte_flow_item_tcp *)
977                                         item->spec;
978                                 rule->hinic_fdir.src_port =
979                                         tcp_spec->hdr.src_port;
980                                 rule->hinic_fdir.dst_port =
981                                         tcp_spec->hdr.dst_port;
982                         }
983
984                 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
985                         /*
986                          * Only care about src & dst ports,
987                          * others should be masked.
988                          */
989                         if (!item->mask) {
990                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
991                                 rte_flow_error_set(error, EINVAL,
992                                         RTE_FLOW_ERROR_TYPE_ITEM,
993                                         item, "Not supported by fdir filter,support src,dst ports");
994                                 return -rte_errno;
995                         }
996
997                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
998                         if (udp_mask->hdr.dgram_len ||
999                             udp_mask->hdr.dgram_cksum) {
1000                                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1001                                 rte_flow_error_set(error, EINVAL,
1002                                         RTE_FLOW_ERROR_TYPE_ITEM,
1003                                         item, "Not supported by fdir filter,support udp");
1004                                 return -rte_errno;
1005                         }
1006                         rule->mask.src_port_mask = udp_mask->hdr.src_port;
1007                         rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1008
1009                         if (item->spec) {
1010                                 udp_spec =
1011                                         (const struct rte_flow_item_udp *)
1012                                         item->spec;
1013                                 rule->hinic_fdir.src_port =
1014                                         udp_spec->hdr.src_port;
1015                                 rule->hinic_fdir.dst_port =
1016                                         udp_spec->hdr.dst_port;
1017                         }
1018                 } else {
1019                         memset(rule, 0, sizeof(struct hinic_fdir_rule));
1020                         rte_flow_error_set(error, EINVAL,
1021                                 RTE_FLOW_ERROR_TYPE_ITEM,
1022                                 item, "Not supported by fdir filter,support tcp/udp");
1023                         return -rte_errno;
1024                 }
1025
1026                 /* Get next no void item */
1027                 *in_out_item = next_no_void_pattern(pattern, item);
1028         }
1029
1030         return 0;
1031 }
1032
1033 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
1034                                         struct hinic_fdir_rule *rule,
1035                                         struct rte_flow_error *error)
1036 {
1037         /* Check if the next not void item is END */
1038         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1039                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1040                 rte_flow_error_set(error, EINVAL,
1041                         RTE_FLOW_ERROR_TYPE_ITEM,
1042                         item, "Not supported by fdir filter,support end");
1043                 return -rte_errno;
1044         }
1045
1046         return 0;
1047 }
1048
1049 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
1050                                         const struct rte_flow_item pattern[],
1051                                         struct hinic_fdir_rule *rule,
1052                                         struct rte_flow_error *error)
1053 {
1054         if (hinic_normal_item_check_ether(&item, pattern, error) ||
1055                 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1056                 hinic_normal_item_check_l4(&item, pattern, rule, error) ||
1057                 hinic_normal_item_check_end(item, rule, error))
1058                 return -rte_errno;
1059
1060         return 0;
1061 }
1062
1063 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1064                                         struct hinic_fdir_rule *rule,
1065                                         struct rte_flow_error *error)
1066 {
1067         /* Must be input direction */
1068         if (!attr->ingress) {
1069                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1070                 rte_flow_error_set(error, EINVAL,
1071                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1072                                    attr, "Only support ingress.");
1073                 return -rte_errno;
1074         }
1075
1076         /* Not supported */
1077         if (attr->egress) {
1078                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1079                 rte_flow_error_set(error, EINVAL,
1080                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1081                                    attr, "Not support egress.");
1082                 return -rte_errno;
1083         }
1084
1085         /* Not supported */
1086         if (attr->priority) {
1087                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1088                 rte_flow_error_set(error, EINVAL,
1089                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1090                         attr, "Not support priority.");
1091                 return -rte_errno;
1092         }
1093
1094         return 0;
1095 }
1096
1097 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1098                                 const struct rte_flow_action actions[],
1099                                 struct hinic_fdir_rule *rule,
1100                                 struct rte_flow_error *error)
1101 {
1102         const struct rte_flow_action *act;
1103
1104         /* Check if the first not void action is QUEUE */
1105         act = next_no_void_action(actions, NULL);
1106         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1107                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1108                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1109                         item, "Not supported action.");
1110                 return -rte_errno;
1111         }
1112
1113         rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1114
1115         /* Check if the next not void item is END */
1116         act = next_no_void_action(actions, act);
1117         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1118                 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1119                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1120                         act, "Not supported action.");
1121                 return -rte_errno;
1122         }
1123
1124         return 0;
1125 }
1126
1127 /**
1128  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1129  * And get the flow director filter info BTW.
1130  * UDP/TCP/SCTP PATTERN:
1131  * The first not void item can be ETH or IPV4 or IPV6
1132  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1133  * The next not void item could be UDP or TCP(optional)
1134  * The next not void item must be END.
1135  * ACTION:
1136  * The first not void action should be QUEUE.
1137  * The second not void optional action should be MARK,
1138  * mark_id is a uint32_t number.
1139  * The next not void action should be END.
1140  * UDP/TCP pattern example:
1141  * ITEM          Spec                                       Mask
1142  * ETH            NULL                                    NULL
1143  * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF
1144  *                   dst_addr  1.2.3.5                 0xFFFFFFFF
1145  * UDP/TCP    src_port  80                         0xFFFF
1146  *                   dst_port  80                         0xFFFF
1147  * END
1148  * Other members in mask and spec should set to 0x00.
1149  * Item->last should be NULL.
1150  */
1151 static int
1152 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1153                                const struct rte_flow_item pattern[],
1154                                const struct rte_flow_action actions[],
1155                                struct hinic_fdir_rule *rule,
1156                                struct rte_flow_error *error)
1157 {
1158         const struct rte_flow_item *item = NULL;
1159
1160         if (hinic_check_filter_arg(attr, pattern, actions, error))
1161                 return -rte_errno;
1162
1163         if (hinic_check_normal_item_ele(item, pattern, rule, error))
1164                 return -rte_errno;
1165
1166         if (hinic_check_normal_attr_ele(attr, rule, error))
1167                 return -rte_errno;
1168
1169         if (hinic_check_normal_act_ele(item, actions, rule, error))
1170                 return -rte_errno;
1171
1172         return 0;
1173 }
1174
1175 static int
1176 hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1177                         const struct rte_flow_attr *attr,
1178                         const struct rte_flow_item pattern[],
1179                         const struct rte_flow_action actions[],
1180                         struct hinic_fdir_rule *rule,
1181                         struct rte_flow_error *error)
1182 {
1183         int ret;
1184
1185         ret = hinic_parse_fdir_filter_normal(attr, pattern,
1186                                                 actions, rule, error);
1187         if (ret)
1188                 return ret;
1189
1190         if (rule->queue >= dev->data->nb_rx_queues)
1191                 return -ENOTSUP;
1192
1193         return ret;
1194 }
1195
1196 /**
1197  * Check if the flow rule is supported by nic.
1198  * It only checkes the format. Don't guarantee the rule can be programmed into
1199  * the HW. Because there can be no enough room for the rule.
1200  */
1201 static int hinic_flow_validate(struct rte_eth_dev *dev,
1202                                 const struct rte_flow_attr *attr,
1203                                 const struct rte_flow_item pattern[],
1204                                 const struct rte_flow_action actions[],
1205                                 struct rte_flow_error *error)
1206 {
1207         struct rte_eth_ethertype_filter ethertype_filter;
1208         struct rte_eth_ntuple_filter ntuple_filter;
1209         struct hinic_fdir_rule fdir_rule;
1210         int ret;
1211
1212         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1213         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1214                                 actions, &ntuple_filter, error);
1215         if (!ret)
1216                 return 0;
1217
1218         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1219         ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1220                                 actions, &ethertype_filter, error);
1221
1222         if (!ret)
1223                 return 0;
1224
1225         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1226         ret = hinic_parse_fdir_filter(dev, attr, pattern,
1227                                 actions, &fdir_rule, error);
1228
1229         return ret;
1230 }
1231
1232 static inline int
1233 ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1234                  struct hinic_5tuple_filter_info *filter_info)
1235 {
1236         switch (filter->dst_ip_mask) {
1237         case UINT32_MAX:
1238                 filter_info->dst_ip_mask = 0;
1239                 filter_info->dst_ip = filter->dst_ip;
1240                 break;
1241         case 0:
1242                 filter_info->dst_ip_mask = 1;
1243                 filter_info->dst_ip = 0;
1244                 break;
1245         default:
1246                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1247                 return -EINVAL;
1248         }
1249
1250         switch (filter->src_ip_mask) {
1251         case UINT32_MAX:
1252                 filter_info->src_ip_mask = 0;
1253                 filter_info->src_ip = filter->src_ip;
1254                 break;
1255         case 0:
1256                 filter_info->src_ip_mask = 1;
1257                 filter_info->src_ip = 0;
1258                 break;
1259         default:
1260                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1261                 return -EINVAL;
1262         }
1263         return 0;
1264 }
1265
1266 static inline int
1267 ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1268                    struct hinic_5tuple_filter_info *filter_info)
1269 {
1270         switch (filter->dst_port_mask) {
1271         case UINT16_MAX:
1272                 filter_info->dst_port_mask = 0;
1273                 filter_info->dst_port = filter->dst_port;
1274                 break;
1275         case 0:
1276                 filter_info->dst_port_mask = 1;
1277                 filter_info->dst_port = 0;
1278                 break;
1279         default:
1280                 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1281                 return -EINVAL;
1282         }
1283
1284         switch (filter->src_port_mask) {
1285         case UINT16_MAX:
1286                 filter_info->src_port_mask = 0;
1287                 filter_info->src_port = filter->src_port;
1288                 break;
1289         case 0:
1290                 filter_info->src_port_mask = 1;
1291                 filter_info->src_port = 0;
1292                 break;
1293         default:
1294                 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1295                 return -EINVAL;
1296         }
1297
1298         return 0;
1299 }
1300
1301 static inline int
1302 ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1303                     struct hinic_5tuple_filter_info *filter_info)
1304 {
1305         switch (filter->proto_mask) {
1306         case UINT8_MAX:
1307                 filter_info->proto_mask = 0;
1308                 filter_info->proto = filter->proto;
1309                 break;
1310         case 0:
1311                 filter_info->proto_mask = 1;
1312                 filter_info->proto = 0;
1313                 break;
1314         default:
1315                 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1316                 return -EINVAL;
1317         }
1318
1319         return 0;
1320 }
1321
1322 static inline int
1323 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1324                         struct hinic_5tuple_filter_info *filter_info)
1325 {
1326         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1327                 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1328                 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1329                 return -EINVAL;
1330
1331         if (ntuple_ip_filter(filter, filter_info) ||
1332                 ntuple_port_filter(filter, filter_info) ||
1333                 ntuple_proto_filter(filter, filter_info))
1334                 return -EINVAL;
1335
1336         filter_info->priority = (uint8_t)filter->priority;
1337         return 0;
1338 }
1339
1340 static inline struct hinic_5tuple_filter *
1341 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1342                            struct hinic_5tuple_filter_info *key)
1343 {
1344         struct hinic_5tuple_filter *it;
1345
1346         TAILQ_FOREACH(it, filter_list, entries) {
1347                 if (memcmp(key, &it->filter_info,
1348                         sizeof(struct hinic_5tuple_filter_info)) == 0) {
1349                         return it;
1350                 }
1351         }
1352
1353         return NULL;
1354 }
1355
1356 static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
1357 {
1358         struct tag_pa_rule lacp_rule;
1359         struct tag_pa_action lacp_action;
1360
1361         memset(&lacp_rule, 0, sizeof(lacp_rule));
1362         memset(&lacp_action, 0, sizeof(lacp_action));
1363         /* LACP TCAM rule */
1364         lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
1365         lacp_rule.l2_header.eth_type.val16 = 0x8809;
1366         lacp_rule.l2_header.eth_type.mask16 = 0xffff;
1367
1368         /* LACP TCAM action */
1369         lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
1370         lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1371         lacp_action.pkt_type = PKT_LACP_TYPE;
1372         lacp_action.pri = 0x0;
1373         lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
1374
1375         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
1376                                         &lacp_rule, &lacp_action);
1377 }
1378
1379 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1380 {
1381         struct tag_pa_rule bgp_rule;
1382         struct tag_pa_action bgp_action;
1383
1384         memset(&bgp_rule, 0, sizeof(bgp_rule));
1385         memset(&bgp_action, 0, sizeof(bgp_action));
1386         /* BGP TCAM rule */
1387         bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1388         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1389         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1390         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1391         bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1392         bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1393
1394         /* BGP TCAM action */
1395         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1396         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1397         bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1398         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1399                                * results, not need to convert
1400                                */
1401         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1402
1403         return hinic_set_fdir_tcam(nic_dev->hwdev,
1404                         TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1405 }
1406
1407 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1408 {
1409         struct tag_pa_rule bgp_rule;
1410         struct tag_pa_action bgp_action;
1411
1412         memset(&bgp_rule, 0, sizeof(bgp_rule));
1413         memset(&bgp_action, 0, sizeof(bgp_action));
1414         /* BGP TCAM rule */
1415         bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1416         bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1417         bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1418         bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1419         bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1420         bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1421
1422         /* BGP TCAM action */
1423         bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1424         bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1425         bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1426         bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1427                                * results, not need to convert
1428                                */
1429         bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1430
1431         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1432                                         &bgp_rule, &bgp_action);
1433 }
1434
1435 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1436 {
1437         struct tag_pa_rule vrrp_rule;
1438         struct tag_pa_action vrrp_action;
1439
1440         memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1441         memset(&vrrp_action, 0, sizeof(vrrp_action));
1442         /* VRRP TCAM rule */
1443         vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1444         vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1445         vrrp_rule.ip_header.protocol.mask8 = 0xff;
1446         vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1447
1448         /* VRRP TCAM action */
1449         vrrp_action.err_type = 0x3f;
1450         vrrp_action.fwd_action = 0x7;
1451         vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1452         vrrp_action.pri = 0xf;
1453         vrrp_action.push_len = 0xf;
1454
1455         return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1456                                         &vrrp_rule, &vrrp_action);
1457 }
1458
1459 static int
1460 hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1461                        struct hinic_filter_info *filter_info)
1462 {
1463         switch (filter->filter_info.proto) {
1464         case IPPROTO_TCP:
1465                 /* Filter type is bgp type if dst_port or src_port is 179 */
1466                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1467                         !(filter->filter_info.dst_port_mask)) {
1468                         filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1469                 } else if (filter->filter_info.src_port ==
1470                         RTE_BE16(BGP_DPORT_ID) &&
1471                         !(filter->filter_info.src_port_mask)) {
1472                         filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1473                 } else {
1474                         PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1475                         " just support BGP now, proto:0x%x, "
1476                         "dst_port:0x%x, dst_port_mask:0x%x."
1477                         "src_port:0x%x, src_port_mask:0x%x.",
1478                         filter->filter_info.proto,
1479                         filter->filter_info.dst_port,
1480                         filter->filter_info.dst_port_mask,
1481                         filter->filter_info.src_port,
1482                         filter->filter_info.src_port_mask);
1483                         return -EINVAL;
1484                 }
1485                 break;
1486
1487         case IPPROTO_VRRP:
1488                 filter_info->pkt_type = PKT_VRRP_TYPE;
1489                 break;
1490
1491         case IPPROTO_ICMP:
1492                 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1493                 break;
1494
1495         case IPPROTO_ICMPV6:
1496                 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1497                 break;
1498
1499         default:
1500                 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1501                 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1502                 "src_port: 0x%x, src_port_mask: 0x%x.",
1503                 filter->filter_info.proto, filter->filter_info.dst_port,
1504                 filter->filter_info.dst_port_mask,
1505                 filter->filter_info.src_port,
1506                 filter->filter_info.src_port_mask);
1507                 return -EINVAL;
1508         }
1509
1510         return 0;
1511 }
1512
1513 static int
1514 hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1515                         struct hinic_filter_info *filter_info,
1516                         int *index)
1517 {
1518         int type_id;
1519
1520         type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1521
1522         if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1523                 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1524                 return -EINVAL;
1525         }
1526
1527         if (!(filter_info->type_mask & (1 << type_id))) {
1528                 filter_info->type_mask |= 1 << type_id;
1529                 filter->index = type_id;
1530                 filter_info->pkt_filters[type_id].enable = true;
1531                 filter_info->pkt_filters[type_id].pkt_proto =
1532                                                 filter->filter_info.proto;
1533                 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1534                                   filter, entries);
1535         } else {
1536                 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1537                 return -EIO;
1538         }
1539
1540         *index = type_id;
1541         return 0;
1542 }
1543
1544 /*
1545  * Add a 5tuple filter
1546  *
1547  * @param dev:
1548  *  Pointer to struct rte_eth_dev.
1549  * @param filter:
1550  *  Pointer to the filter that will be added.
1551  * @return
1552  *    - On success, zero.
1553  *    - On failure, a negative value.
1554  */
1555 static int
1556 hinic_add_5tuple_filter(struct rte_eth_dev *dev,
1557                         struct hinic_5tuple_filter *filter)
1558 {
1559         struct hinic_filter_info *filter_info =
1560                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1561         int i, ret_fw;
1562         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1563
1564         if (hinic_filter_info_init(filter, filter_info) ||
1565                 hinic_lookup_new_filter(filter, filter_info, &i))
1566                 return -EFAULT;
1567
1568         ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
1569                                         filter_info->qid,
1570                                         filter_info->pkt_filters[i].enable,
1571                                         true);
1572         if (ret_fw) {
1573                 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1574                         filter_info->pkt_type, filter->queue,
1575                         filter_info->pkt_filters[i].enable);
1576                 return -EFAULT;
1577         }
1578
1579         PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1580                         filter_info->pkt_type, filter_info->qid,
1581                         filter_info->pkt_filters[filter->index].enable);
1582
1583         switch (filter->filter_info.proto) {
1584         case IPPROTO_TCP:
1585                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
1586                         ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
1587                         if (ret_fw) {
1588                                 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
1589                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
1590                                         filter_info->pkt_type, filter->queue,
1591                                         filter_info->pkt_filters[i].enable);
1592                                 return -EFAULT;
1593                         }
1594
1595                         PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
1596                                 filter->queue,
1597                                 filter_info->pkt_filters[i].enable);
1598                 } else if (filter->filter_info.src_port ==
1599                         RTE_BE16(BGP_DPORT_ID)) {
1600                         ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
1601                         if (ret_fw) {
1602                                 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
1603                                         "type: 0x%x, qid: 0x%x, enable: 0x%x",
1604                                         filter_info->pkt_type, filter->queue,
1605                                         filter_info->pkt_filters[i].enable);
1606                                 return -EFAULT;
1607                         }
1608
1609                         PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
1610                                         filter->queue,
1611                                         filter_info->pkt_filters[i].enable);
1612                 }
1613
1614                 break;
1615
1616         case IPPROTO_VRRP:
1617                 ret_fw = hinic_set_vrrp_tcam(nic_dev);
1618                 if (ret_fw) {
1619                         PMD_DRV_LOG(ERR, "Set VRRP failed, "
1620                                 "type: 0x%x, qid: 0x%x, enable: 0x%x",
1621                                 filter_info->pkt_type, filter->queue,
1622                                 filter_info->pkt_filters[i].enable);
1623                         return -EFAULT;
1624                 }
1625                 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
1626                                 filter->queue,
1627                                 filter_info->pkt_filters[i].enable);
1628                 break;
1629
1630         default:
1631                 break;
1632         }
1633
1634         return 0;
1635 }
1636
1637 /*
1638  * Remove a 5tuple filter
1639  *
1640  * @param dev
1641  *  Pointer to struct rte_eth_dev.
1642  * @param filter
1643  *  The pointer of the filter will be removed.
1644  */
1645 static void
1646 hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
1647                            struct hinic_5tuple_filter *filter)
1648 {
1649         struct hinic_filter_info *filter_info =
1650                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1651         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1652
1653         switch (filter->filter_info.proto) {
1654         case IPPROTO_VRRP:
1655                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1656                 break;
1657
1658         case IPPROTO_TCP:
1659                 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
1660                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
1661                                                         TCAM_PKT_BGP_DPORT);
1662                 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
1663                         (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
1664                                                         TCAM_PKT_BGP_SPORT);
1665                 break;
1666
1667         default:
1668                 break;
1669         }
1670
1671         hinic_filter_info_init(filter, filter_info);
1672
1673         filter_info->pkt_filters[filter->index].enable = false;
1674         filter_info->pkt_filters[filter->index].pkt_proto = 0;
1675
1676         PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1677                 filter_info->pkt_type,
1678                 filter_info->pkt_filters[filter->index].qid,
1679                 filter_info->pkt_filters[filter->index].enable);
1680         (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
1681                                 filter_info->pkt_filters[filter->index].qid,
1682                                 filter_info->pkt_filters[filter->index].enable,
1683                                 true);
1684
1685         filter_info->pkt_type = 0;
1686         filter_info->qid = 0;
1687         filter_info->pkt_filters[filter->index].qid = 0;
1688         filter_info->type_mask &= ~(1 <<  (filter->index));
1689         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
1690
1691         rte_free(filter);
1692 }
1693
1694 /*
1695  * Add or delete a ntuple filter
1696  *
1697  * @param dev
1698  *  Pointer to struct rte_eth_dev.
1699  * @param ntuple_filter
1700  *  Pointer to struct rte_eth_ntuple_filter
1701  * @param add
1702  *  If true, add filter; if false, remove filter
1703  * @return
1704  *    - On success, zero.
1705  *    - On failure, a negative value.
1706  */
1707 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
1708                                 struct rte_eth_ntuple_filter *ntuple_filter,
1709                                 bool add)
1710 {
1711         struct hinic_filter_info *filter_info =
1712                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1713         struct hinic_5tuple_filter_info filter_5tuple;
1714         struct hinic_5tuple_filter *filter;
1715         int ret;
1716
1717         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
1718                 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
1719                 return -EINVAL;
1720         }
1721
1722         memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
1723         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
1724         if (ret < 0)
1725                 return ret;
1726
1727         filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
1728                                          &filter_5tuple);
1729         if (filter != NULL && add) {
1730                 PMD_DRV_LOG(ERR, "Filter exists.");
1731                 return -EEXIST;
1732         }
1733         if (filter == NULL && !add) {
1734                 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
1735                 return -ENOENT;
1736         }
1737
1738         if (add) {
1739                 filter = rte_zmalloc("hinic_5tuple_filter",
1740                                 sizeof(struct hinic_5tuple_filter), 0);
1741                 if (filter == NULL)
1742                         return -ENOMEM;
1743                 rte_memcpy(&filter->filter_info, &filter_5tuple,
1744                                 sizeof(struct hinic_5tuple_filter_info));
1745                 filter->queue = ntuple_filter->queue;
1746
1747                 filter_info->qid = ntuple_filter->queue;
1748
1749                 ret = hinic_add_5tuple_filter(dev, filter);
1750                 if (ret)
1751                         rte_free(filter);
1752
1753                 return ret;
1754         }
1755
1756         hinic_remove_5tuple_filter(dev, filter);
1757
1758         return 0;
1759 }
1760
1761 static inline int
1762 hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
1763 {
1764         if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
1765                 return -EINVAL;
1766
1767         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
1768                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
1769                 PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
1770                         " ethertype filter", filter->ether_type);
1771                 return -EINVAL;
1772         }
1773
1774         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
1775                 PMD_DRV_LOG(ERR, "Mac compare is not supported");
1776                 return -EINVAL;
1777         }
1778         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1779                 PMD_DRV_LOG(ERR, "Drop option is not supported");
1780                 return -EINVAL;
1781         }
1782
1783         return 0;
1784 }
1785
1786 static inline int
1787 hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
1788                               struct hinic_pkt_filter *ethertype_filter)
1789 {
1790         switch (ethertype_filter->pkt_proto) {
1791         case RTE_ETHER_TYPE_SLOW:
1792                 filter_info->pkt_type = PKT_LACP_TYPE;
1793                 break;
1794
1795         case RTE_ETHER_TYPE_ARP:
1796                 filter_info->pkt_type = PKT_ARP_TYPE;
1797                 break;
1798
1799         default:
1800                 PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
1801                 return -EIO;
1802         }
1803
1804         return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1805 }
1806
1807 static inline int
1808 hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
1809                               struct hinic_pkt_filter *ethertype_filter)
1810 {
1811         int id;
1812
1813         /* Find LACP or VRRP type id */
1814         id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
1815         if (id < 0)
1816                 return -EINVAL;
1817
1818         if (!(filter_info->type_mask & (1 << id))) {
1819                 filter_info->type_mask |= 1 << id;
1820                 filter_info->pkt_filters[id].pkt_proto =
1821                         ethertype_filter->pkt_proto;
1822                 filter_info->pkt_filters[id].enable = ethertype_filter->enable;
1823                 filter_info->qid = ethertype_filter->qid;
1824                 return id;
1825         }
1826
1827         PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
1828         return -EINVAL;
1829 }
1830
1831 static inline void
1832 hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
1833                               uint8_t idx)
1834 {
1835         if (idx >= HINIC_MAX_Q_FILTERS)
1836                 return;
1837
1838         filter_info->pkt_type = 0;
1839         filter_info->type_mask &= ~(1 << idx);
1840         filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
1841         filter_info->pkt_filters[idx].enable = FALSE;
1842         filter_info->pkt_filters[idx].qid = 0;
1843 }
1844
1845 static inline int
1846 hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
1847                                struct rte_eth_ethertype_filter *filter,
1848                                bool add)
1849 {
1850         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1851         struct hinic_filter_info *filter_info =
1852                 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1853         struct hinic_pkt_filter ethertype_filter;
1854         int i;
1855         int ret_fw;
1856
1857         if (hinic_check_ethertype_filter(filter))
1858                 return -EINVAL;
1859
1860         if (add) {
1861                 ethertype_filter.pkt_proto = filter->ether_type;
1862                 ethertype_filter.enable = TRUE;
1863                 ethertype_filter.qid = (u8)filter->queue;
1864                 i = hinic_ethertype_filter_insert(filter_info,
1865                                                     &ethertype_filter);
1866                 if (i < 0)
1867                         return -ENOSPC;
1868
1869                 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
1870                                 filter_info->pkt_type, filter_info->qid,
1871                                 filter_info->pkt_filters[i].enable, true);
1872                 if (ret_fw) {
1873                         PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1874                                 filter_info->pkt_type, filter->queue,
1875                                 filter_info->pkt_filters[i].enable);
1876
1877                         hinic_ethertype_filter_remove(filter_info, i);
1878                         return -ENOENT;
1879                 }
1880                 PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1881                                 filter_info->pkt_type, filter->queue,
1882                                 filter_info->pkt_filters[i].enable);
1883
1884                 switch (ethertype_filter.pkt_proto) {
1885                 case RTE_ETHER_TYPE_SLOW:
1886                         ret_fw = hinic_set_lacp_tcam(nic_dev);
1887                         if (ret_fw) {
1888                                 PMD_DRV_LOG(ERR, "Add lacp tcam failed");
1889                                 hinic_ethertype_filter_remove(filter_info, i);
1890                                 return -ENOENT;
1891                         }
1892
1893                         PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
1894                         break;
1895                 default:
1896                         break;
1897                 }
1898
1899         } else {
1900                 ethertype_filter.pkt_proto = filter->ether_type;
1901                 i = hinic_ethertype_filter_lookup(filter_info,
1902                                                 &ethertype_filter);
1903
1904                 if ((filter_info->type_mask & (1 << i))) {
1905                         filter_info->pkt_filters[i].enable = FALSE;
1906                         (void)hinic_set_fdir_filter(nic_dev->hwdev,
1907                                         filter_info->pkt_type,
1908                                         filter_info->pkt_filters[i].qid,
1909                                         filter_info->pkt_filters[i].enable,
1910                                         true);
1911
1912                         PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1913                                         filter_info->pkt_type,
1914                                         filter_info->pkt_filters[i].qid,
1915                                         filter_info->pkt_filters[i].enable);
1916
1917                         switch (ethertype_filter.pkt_proto) {
1918                         case RTE_ETHER_TYPE_SLOW:
1919                                 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
1920                                                                 TCAM_PKT_LACP);
1921                                 PMD_DRV_LOG(INFO,
1922                                         "Del lacp tcam succeed");
1923                                 break;
1924                         default:
1925                                 break;
1926                         }
1927
1928                         hinic_ethertype_filter_remove(filter_info, i);
1929
1930                 } else {
1931                         PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
1932                                         filter_info->pkt_type, filter->queue,
1933                                         filter_info->pkt_filters[i].enable);
1934                         return -ENOENT;
1935                 }
1936         }
1937
1938         return 0;
1939 }
1940
1941 static int
1942 hinic_fdir_info_init(struct hinic_fdir_rule *rule,
1943                      struct hinic_fdir_info *fdir_info)
1944 {
1945         switch (rule->mask.src_ipv4_mask) {
1946         case UINT32_MAX:
1947                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
1948                 fdir_info->qid = rule->queue;
1949                 fdir_info->fdir_key = rule->hinic_fdir.src_ip;
1950                 return 0;
1951
1952         case 0:
1953                 break;
1954
1955         default:
1956                 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1957                 return -EINVAL;
1958         }
1959
1960         switch (rule->mask.dst_ipv4_mask) {
1961         case UINT32_MAX:
1962                 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
1963                 fdir_info->qid = rule->queue;
1964                 fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
1965                 return 0;
1966
1967         case 0:
1968                 break;
1969
1970         default:
1971                 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1972                 return -EINVAL;
1973         }
1974
1975         if (fdir_info->fdir_flag == 0) {
1976                 PMD_DRV_LOG(ERR, "All support mask is NULL.");
1977                 return -EINVAL;
1978         }
1979
1980         return 0;
1981 }
1982
1983 static inline int
1984 hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
1985                           struct hinic_fdir_rule *rule,
1986                           bool add)
1987 {
1988         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1989         struct hinic_fdir_info fdir_info;
1990         int ret;
1991
1992         memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
1993
1994         ret = hinic_fdir_info_init(rule, &fdir_info);
1995         if (ret) {
1996                 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
1997                 return ret;
1998         }
1999
2000         if (add) {
2001                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2002                                                 true, fdir_info.fdir_key,
2003                                                 true, fdir_info.fdir_flag);
2004                 if (ret) {
2005                         PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2006                                         fdir_info.fdir_flag, fdir_info.qid,
2007                                         fdir_info.fdir_key);
2008                         return -ENOENT;
2009                 }
2010                 PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2011                                 fdir_info.fdir_flag, fdir_info.qid,
2012                                 fdir_info.fdir_key);
2013         } else {
2014                 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2015                                                 false, fdir_info.fdir_key, true,
2016                                                 fdir_info.fdir_flag);
2017                 if (ret) {
2018                         PMD_DRV_LOG(ERR, "Del fdir filter ailed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2019                                 fdir_info.fdir_flag, fdir_info.qid,
2020                                 fdir_info.fdir_key);
2021                         return -ENOENT;
2022                 }
2023                 PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2024                                 fdir_info.fdir_flag, fdir_info.qid,
2025                                 fdir_info.fdir_key);
2026         }
2027
2028         return 0;
2029 }
2030
2031 /**
2032  * Create or destroy a flow rule.
2033  * Theorically one rule can match more than one filters.
2034  * We will let it use the filter which it hitt first.
2035  * So, the sequence matters.
2036  */
2037 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
2038                                         const struct rte_flow_attr *attr,
2039                                         const struct rte_flow_item pattern[],
2040                                         const struct rte_flow_action actions[],
2041                                         struct rte_flow_error *error)
2042 {
2043         int ret;
2044         struct rte_eth_ntuple_filter ntuple_filter;
2045         struct rte_eth_ethertype_filter ethertype_filter;
2046         struct hinic_fdir_rule fdir_rule;
2047         struct rte_flow *flow = NULL;
2048         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2049         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2050         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2051         struct hinic_flow_mem *hinic_flow_mem_ptr;
2052         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2053
2054         flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
2055         if (!flow) {
2056                 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
2057                 return NULL;
2058         }
2059
2060         hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
2061                         sizeof(struct hinic_flow_mem), 0);
2062         if (!hinic_flow_mem_ptr) {
2063                 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
2064                 rte_free(flow);
2065                 return NULL;
2066         }
2067
2068         hinic_flow_mem_ptr->flow = flow;
2069         TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2070                                 entries);
2071
2072         /* Add ntuple filter */
2073         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2074         ret = hinic_parse_ntuple_filter(dev, attr, pattern,
2075                         actions, &ntuple_filter, error);
2076         if (!ret) {
2077                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2078                 if (!ret) {
2079                         ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
2080                                 sizeof(struct hinic_ntuple_filter_ele), 0);
2081                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2082                                    &ntuple_filter,
2083                                    sizeof(struct rte_eth_ntuple_filter));
2084                         TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
2085                         ntuple_filter_ptr, entries);
2086                         flow->rule = ntuple_filter_ptr;
2087                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2088
2089                         PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
2090                         hinic_global_func_id(nic_dev->hwdev));
2091                         return flow;
2092                 }
2093                 goto out;
2094         }
2095
2096         /* Add ethertype filter */
2097         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2098         ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
2099                                         &ethertype_filter, error);
2100         if (!ret) {
2101                 ret = hinic_add_del_ethertype_filter(dev, &ethertype_filter,
2102                                                      TRUE);
2103                 if (!ret) {
2104                         ethertype_filter_ptr =
2105                                 rte_zmalloc("hinic_ethertype_filter",
2106                                 sizeof(struct hinic_ethertype_filter_ele), 0);
2107                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2108                                 &ethertype_filter,
2109                                 sizeof(struct rte_eth_ethertype_filter));
2110                         TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
2111                                 ethertype_filter_ptr, entries);
2112                         flow->rule = ethertype_filter_ptr;
2113                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2114
2115                         PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
2116                                         hinic_global_func_id(nic_dev->hwdev));
2117                         return flow;
2118                 }
2119                 goto out;
2120         }
2121
2122         /* Add fdir filter */
2123         memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
2124         ret = hinic_parse_fdir_filter(dev, attr, pattern,
2125                                       actions, &fdir_rule, error);
2126         if (!ret) {
2127                 ret = hinic_add_del_fdir_filter(dev, &fdir_rule, TRUE);
2128                 if (!ret) {
2129                         fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
2130                                 sizeof(struct hinic_fdir_rule_ele), 0);
2131                         rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
2132                                 sizeof(struct hinic_fdir_rule));
2133                         TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
2134                                 fdir_rule_ptr, entries);
2135                         flow->rule = fdir_rule_ptr;
2136                         flow->filter_type = RTE_ETH_FILTER_FDIR;
2137
2138                         PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
2139                                         hinic_global_func_id(nic_dev->hwdev));
2140                         return flow;
2141                 }
2142                 goto out;
2143         }
2144
2145 out:
2146         TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
2147         rte_flow_error_set(error, -ret,
2148                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2149                            "Failed to create flow.");
2150         rte_free(hinic_flow_mem_ptr);
2151         rte_free(flow);
2152         return NULL;
2153 }
2154
2155 /* Destroy a flow rule on hinic. */
2156 static int hinic_flow_destroy(struct rte_eth_dev *dev,
2157                               struct rte_flow *flow,
2158                               struct rte_flow_error *error)
2159 {
2160         int ret;
2161         struct rte_flow *pmd_flow = flow;
2162         enum rte_filter_type filter_type = pmd_flow->filter_type;
2163         struct rte_eth_ntuple_filter ntuple_filter;
2164         struct rte_eth_ethertype_filter ethertype_filter;
2165         struct hinic_fdir_rule fdir_rule;
2166         struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2167         struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2168         struct hinic_fdir_rule_ele *fdir_rule_ptr;
2169         struct hinic_flow_mem *hinic_flow_mem_ptr;
2170         struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2171
2172         switch (filter_type) {
2173         case RTE_ETH_FILTER_NTUPLE:
2174                 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
2175                                         pmd_flow->rule;
2176                 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
2177                         sizeof(struct rte_eth_ntuple_filter));
2178                 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2179                 if (!ret) {
2180                         TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
2181                                 ntuple_filter_ptr, entries);
2182                         rte_free(ntuple_filter_ptr);
2183                 }
2184                 break;
2185         case RTE_ETH_FILTER_ETHERTYPE:
2186                 ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
2187                                         pmd_flow->rule;
2188                 rte_memcpy(&ethertype_filter,
2189                         &ethertype_filter_ptr->filter_info,
2190                         sizeof(struct rte_eth_ethertype_filter));
2191                 ret = hinic_add_del_ethertype_filter(dev,
2192                                 &ethertype_filter, FALSE);
2193                 if (!ret) {
2194                         TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
2195                                 ethertype_filter_ptr, entries);
2196                         rte_free(ethertype_filter_ptr);
2197                 }
2198                 break;
2199         case RTE_ETH_FILTER_FDIR:
2200                 fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
2201                 rte_memcpy(&fdir_rule,
2202                         &fdir_rule_ptr->filter_info,
2203                         sizeof(struct hinic_fdir_rule));
2204                 ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
2205                 if (!ret) {
2206                         TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
2207                                 fdir_rule_ptr, entries);
2208                         rte_free(fdir_rule_ptr);
2209                 }
2210                 break;
2211         default:
2212                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2213                         filter_type);
2214                 ret = -EINVAL;
2215                 break;
2216         }
2217
2218         if (ret) {
2219                 rte_flow_error_set(error, EINVAL,
2220                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2221                                 NULL, "Failed to destroy flow");
2222                 return ret;
2223         }
2224
2225         TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
2226                 if (hinic_flow_mem_ptr->flow == pmd_flow) {
2227                         TAILQ_REMOVE(&nic_dev->hinic_flow_list,
2228                                 hinic_flow_mem_ptr, entries);
2229                         rte_free(hinic_flow_mem_ptr);
2230                         break;
2231                 }
2232         }
2233         rte_free(flow);
2234
2235         PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
2236                         hinic_global_func_id(nic_dev->hwdev));
2237
2238         return ret;
2239 }
2240
2241 const struct rte_flow_ops hinic_flow_ops = {
2242         .validate = hinic_flow_validate,
2243         .create = hinic_flow_create,
2244         .destroy = hinic_flow_destroy,
2245 };