313f98515062ef8e8b88ad220295c524011d9308
[dpdk.git] / drivers / net / txgbe / txgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <sys/queue.h>
6 #include <rte_bus_pci.h>
7 #include <rte_flow.h>
8 #include <rte_flow_driver.h>
9
10 #include "txgbe_ethdev.h"
11
12 #define TXGBE_MIN_N_TUPLE_PRIO 1
13 #define TXGBE_MAX_N_TUPLE_PRIO 7
14 #define TXGBE_MAX_FLX_SOURCE_OFF 62
15
16 /**
17  * Endless loop will never happen with below assumption
18  * 1. there is at least one no-void item(END)
19  * 2. cur is before END.
20  */
21 static inline
22 const struct rte_flow_item *next_no_void_pattern(
23                 const struct rte_flow_item pattern[],
24                 const struct rte_flow_item *cur)
25 {
26         const struct rte_flow_item *next =
27                 cur ? cur + 1 : &pattern[0];
28         while (1) {
29                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
30                         return next;
31                 next++;
32         }
33 }
34
35 static inline
36 const struct rte_flow_action *next_no_void_action(
37                 const struct rte_flow_action actions[],
38                 const struct rte_flow_action *cur)
39 {
40         const struct rte_flow_action *next =
41                 cur ? cur + 1 : &actions[0];
42         while (1) {
43                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
44                         return next;
45                 next++;
46         }
47 }
48
49 /**
50  * Please aware there's an assumption for all the parsers.
51  * rte_flow_item is using big endian, rte_flow_attr and
52  * rte_flow_action are using CPU order.
53  * Because the pattern is used to describe the packets,
54  * normally the packets should use network order.
55  */
56
57 /**
58  * Parse the rule to see if it is a n-tuple rule.
59  * And get the n-tuple filter info BTW.
60  * pattern:
61  * The first not void item can be ETH or IPV4.
62  * The second not void item must be IPV4 if the first one is ETH.
63  * The third not void item must be UDP or TCP.
64  * The next not void item must be END.
65  * action:
66  * The first not void action should be QUEUE.
67  * The next not void action should be END.
68  * pattern example:
69  * ITEM         Spec                    Mask
70  * ETH          NULL                    NULL
71  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
72  *              dst_addr 192.167.3.50   0xFFFFFFFF
73  *              next_proto_id   17      0xFF
74  * UDP/TCP/     src_port        80      0xFFFF
75  * SCTP         dst_port        80      0xFFFF
76  * END
77  * other members in mask and spec should set to 0x00.
78  * item->last should be NULL.
79  */
80 static int
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82                          const struct rte_flow_item pattern[],
83                          const struct rte_flow_action actions[],
84                          struct rte_eth_ntuple_filter *filter,
85                          struct rte_flow_error *error)
86 {
87         const struct rte_flow_item *item;
88         const struct rte_flow_action *act;
89         const struct rte_flow_item_ipv4 *ipv4_spec;
90         const struct rte_flow_item_ipv4 *ipv4_mask;
91         const struct rte_flow_item_tcp *tcp_spec;
92         const struct rte_flow_item_tcp *tcp_mask;
93         const struct rte_flow_item_udp *udp_spec;
94         const struct rte_flow_item_udp *udp_mask;
95         const struct rte_flow_item_sctp *sctp_spec;
96         const struct rte_flow_item_sctp *sctp_mask;
97         const struct rte_flow_item_eth *eth_spec;
98         const struct rte_flow_item_eth *eth_mask;
99         const struct rte_flow_item_vlan *vlan_spec;
100         const struct rte_flow_item_vlan *vlan_mask;
101         struct rte_flow_item_eth eth_null;
102         struct rte_flow_item_vlan vlan_null;
103
104         if (!pattern) {
105                 rte_flow_error_set(error,
106                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
107                         NULL, "NULL pattern.");
108                 return -rte_errno;
109         }
110
111         if (!actions) {
112                 rte_flow_error_set(error, EINVAL,
113                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
114                                    NULL, "NULL action.");
115                 return -rte_errno;
116         }
117         if (!attr) {
118                 rte_flow_error_set(error, EINVAL,
119                                    RTE_FLOW_ERROR_TYPE_ATTR,
120                                    NULL, "NULL attribute.");
121                 return -rte_errno;
122         }
123
124         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
125         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
126
127         /* the first not void item can be MAC or IPv4 */
128         item = next_no_void_pattern(pattern, NULL);
129
130         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
131             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
132                 rte_flow_error_set(error, EINVAL,
133                         RTE_FLOW_ERROR_TYPE_ITEM,
134                         item, "Not supported by ntuple filter");
135                 return -rte_errno;
136         }
137         /* Skip Ethernet */
138         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
139                 eth_spec = item->spec;
140                 eth_mask = item->mask;
141                 /*Not supported last point for range*/
142                 if (item->last) {
143                         rte_flow_error_set(error,
144                           EINVAL,
145                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
146                           item, "Not supported last point for range");
147                         return -rte_errno;
148                 }
149                 /* if the first item is MAC, the content should be NULL */
150                 if ((item->spec || item->mask) &&
151                         (memcmp(eth_spec, &eth_null,
152                                 sizeof(struct rte_flow_item_eth)) ||
153                          memcmp(eth_mask, &eth_null,
154                                 sizeof(struct rte_flow_item_eth)))) {
155                         rte_flow_error_set(error, EINVAL,
156                                 RTE_FLOW_ERROR_TYPE_ITEM,
157                                 item, "Not supported by ntuple filter");
158                         return -rte_errno;
159                 }
160                 /* check if the next not void item is IPv4 or Vlan */
161                 item = next_no_void_pattern(pattern, item);
162                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
163                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
164                         rte_flow_error_set(error,
165                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
166                                 item, "Not supported by ntuple filter");
167                         return -rte_errno;
168                 }
169         }
170
171         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
172                 vlan_spec = item->spec;
173                 vlan_mask = item->mask;
174                 /*Not supported last point for range*/
175                 if (item->last) {
176                         rte_flow_error_set(error,
177                                 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
178                                 item, "Not supported last point for range");
179                         return -rte_errno;
180                 }
181                 /* the content should be NULL */
182                 if ((item->spec || item->mask) &&
183                         (memcmp(vlan_spec, &vlan_null,
184                                 sizeof(struct rte_flow_item_vlan)) ||
185                          memcmp(vlan_mask, &vlan_null,
186                                 sizeof(struct rte_flow_item_vlan)))) {
187                         rte_flow_error_set(error, EINVAL,
188                                 RTE_FLOW_ERROR_TYPE_ITEM,
189                                 item, "Not supported by ntuple filter");
190                         return -rte_errno;
191                 }
192                 /* check if the next not void item is IPv4 */
193                 item = next_no_void_pattern(pattern, item);
194                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
195                         rte_flow_error_set(error,
196                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
197                           item, "Not supported by ntuple filter");
198                         return -rte_errno;
199                 }
200         }
201
202         if (item->mask) {
203                 /* get the IPv4 info */
204                 if (!item->spec || !item->mask) {
205                         rte_flow_error_set(error, EINVAL,
206                                 RTE_FLOW_ERROR_TYPE_ITEM,
207                                 item, "Invalid ntuple mask");
208                         return -rte_errno;
209                 }
210                 /*Not supported last point for range*/
211                 if (item->last) {
212                         rte_flow_error_set(error, EINVAL,
213                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
214                                 item, "Not supported last point for range");
215                         return -rte_errno;
216                 }
217
218                 ipv4_mask = item->mask;
219                 /**
220                  * Only support src & dst addresses, protocol,
221                  * others should be masked.
222                  */
223                 if (ipv4_mask->hdr.version_ihl ||
224                     ipv4_mask->hdr.type_of_service ||
225                     ipv4_mask->hdr.total_length ||
226                     ipv4_mask->hdr.packet_id ||
227                     ipv4_mask->hdr.fragment_offset ||
228                     ipv4_mask->hdr.time_to_live ||
229                     ipv4_mask->hdr.hdr_checksum) {
230                         rte_flow_error_set(error,
231                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
232                                 item, "Not supported by ntuple filter");
233                         return -rte_errno;
234                 }
235                 if ((ipv4_mask->hdr.src_addr != 0 &&
236                         ipv4_mask->hdr.src_addr != UINT32_MAX) ||
237                         (ipv4_mask->hdr.dst_addr != 0 &&
238                         ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
239                         (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
240                         ipv4_mask->hdr.next_proto_id != 0)) {
241                         rte_flow_error_set(error,
242                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
243                                 item, "Not supported by ntuple filter");
244                         return -rte_errno;
245                 }
246
247                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
248                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
249                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
250
251                 ipv4_spec = item->spec;
252                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
253                 filter->src_ip = ipv4_spec->hdr.src_addr;
254                 filter->proto  = ipv4_spec->hdr.next_proto_id;
255         }
256
257         /* check if the next not void item is TCP or UDP */
258         item = next_no_void_pattern(pattern, item);
259         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
260             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
261             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
262             item->type != RTE_FLOW_ITEM_TYPE_END) {
263                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
264                 rte_flow_error_set(error, EINVAL,
265                         RTE_FLOW_ERROR_TYPE_ITEM,
266                         item, "Not supported by ntuple filter");
267                 return -rte_errno;
268         }
269
270         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
271                 (!item->spec && !item->mask)) {
272                 goto action;
273         }
274
275         /* get the TCP/UDP/SCTP info */
276         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
277                 (!item->spec || !item->mask)) {
278                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
279                 rte_flow_error_set(error, EINVAL,
280                         RTE_FLOW_ERROR_TYPE_ITEM,
281                         item, "Invalid ntuple mask");
282                 return -rte_errno;
283         }
284
285         /*Not supported last point for range*/
286         if (item->last) {
287                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
288                 rte_flow_error_set(error, EINVAL,
289                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
290                         item, "Not supported last point for range");
291                 return -rte_errno;
292         }
293
294         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
295                 tcp_mask = item->mask;
296
297                 /**
298                  * Only support src & dst ports, tcp flags,
299                  * others should be masked.
300                  */
301                 if (tcp_mask->hdr.sent_seq ||
302                     tcp_mask->hdr.recv_ack ||
303                     tcp_mask->hdr.data_off ||
304                     tcp_mask->hdr.rx_win ||
305                     tcp_mask->hdr.cksum ||
306                     tcp_mask->hdr.tcp_urp) {
307                         memset(filter, 0,
308                                 sizeof(struct rte_eth_ntuple_filter));
309                         rte_flow_error_set(error, EINVAL,
310                                 RTE_FLOW_ERROR_TYPE_ITEM,
311                                 item, "Not supported by ntuple filter");
312                         return -rte_errno;
313                 }
314                 if ((tcp_mask->hdr.src_port != 0 &&
315                         tcp_mask->hdr.src_port != UINT16_MAX) ||
316                         (tcp_mask->hdr.dst_port != 0 &&
317                         tcp_mask->hdr.dst_port != UINT16_MAX)) {
318                         rte_flow_error_set(error,
319                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
320                                 item, "Not supported by ntuple filter");
321                         return -rte_errno;
322                 }
323
324                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
325                 filter->src_port_mask  = tcp_mask->hdr.src_port;
326                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
327                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
328                 } else if (!tcp_mask->hdr.tcp_flags) {
329                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
330                 } else {
331                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
332                         rte_flow_error_set(error, EINVAL,
333                                 RTE_FLOW_ERROR_TYPE_ITEM,
334                                 item, "Not supported by ntuple filter");
335                         return -rte_errno;
336                 }
337
338                 tcp_spec = item->spec;
339                 filter->dst_port  = tcp_spec->hdr.dst_port;
340                 filter->src_port  = tcp_spec->hdr.src_port;
341                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
342         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
343                 udp_mask = item->mask;
344
345                 /**
346                  * Only support src & dst ports,
347                  * others should be masked.
348                  */
349                 if (udp_mask->hdr.dgram_len ||
350                     udp_mask->hdr.dgram_cksum) {
351                         memset(filter, 0,
352                                 sizeof(struct rte_eth_ntuple_filter));
353                         rte_flow_error_set(error, EINVAL,
354                                 RTE_FLOW_ERROR_TYPE_ITEM,
355                                 item, "Not supported by ntuple filter");
356                         return -rte_errno;
357                 }
358                 if ((udp_mask->hdr.src_port != 0 &&
359                         udp_mask->hdr.src_port != UINT16_MAX) ||
360                         (udp_mask->hdr.dst_port != 0 &&
361                         udp_mask->hdr.dst_port != UINT16_MAX)) {
362                         rte_flow_error_set(error,
363                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
364                                 item, "Not supported by ntuple filter");
365                         return -rte_errno;
366                 }
367
368                 filter->dst_port_mask = udp_mask->hdr.dst_port;
369                 filter->src_port_mask = udp_mask->hdr.src_port;
370
371                 udp_spec = item->spec;
372                 filter->dst_port = udp_spec->hdr.dst_port;
373                 filter->src_port = udp_spec->hdr.src_port;
374         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
375                 sctp_mask = item->mask;
376
377                 /**
378                  * Only support src & dst ports,
379                  * others should be masked.
380                  */
381                 if (sctp_mask->hdr.tag ||
382                     sctp_mask->hdr.cksum) {
383                         memset(filter, 0,
384                                 sizeof(struct rte_eth_ntuple_filter));
385                         rte_flow_error_set(error, EINVAL,
386                                 RTE_FLOW_ERROR_TYPE_ITEM,
387                                 item, "Not supported by ntuple filter");
388                         return -rte_errno;
389                 }
390
391                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
392                 filter->src_port_mask = sctp_mask->hdr.src_port;
393
394                 sctp_spec = item->spec;
395                 filter->dst_port = sctp_spec->hdr.dst_port;
396                 filter->src_port = sctp_spec->hdr.src_port;
397         } else {
398                 goto action;
399         }
400
401         /* check if the next not void item is END */
402         item = next_no_void_pattern(pattern, item);
403         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
404                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
405                 rte_flow_error_set(error, EINVAL,
406                         RTE_FLOW_ERROR_TYPE_ITEM,
407                         item, "Not supported by ntuple filter");
408                 return -rte_errno;
409         }
410
411 action:
412
413         /**
414          * n-tuple only supports forwarding,
415          * check if the first not void action is QUEUE.
416          */
417         act = next_no_void_action(actions, NULL);
418         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
419                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
420                 rte_flow_error_set(error, EINVAL,
421                         RTE_FLOW_ERROR_TYPE_ACTION,
422                         item, "Not supported action.");
423                 return -rte_errno;
424         }
425         filter->queue =
426                 ((const struct rte_flow_action_queue *)act->conf)->index;
427
428         /* check if the next not void item is END */
429         act = next_no_void_action(actions, act);
430         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
431                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
432                 rte_flow_error_set(error, EINVAL,
433                         RTE_FLOW_ERROR_TYPE_ACTION,
434                         act, "Not supported action.");
435                 return -rte_errno;
436         }
437
438         /* parse attr */
439         /* must be input direction */
440         if (!attr->ingress) {
441                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
442                 rte_flow_error_set(error, EINVAL,
443                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
444                                    attr, "Only support ingress.");
445                 return -rte_errno;
446         }
447
448         /* not supported */
449         if (attr->egress) {
450                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
451                 rte_flow_error_set(error, EINVAL,
452                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
453                                    attr, "Not support egress.");
454                 return -rte_errno;
455         }
456
457         /* not supported */
458         if (attr->transfer) {
459                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
460                 rte_flow_error_set(error, EINVAL,
461                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
462                                    attr, "No support for transfer.");
463                 return -rte_errno;
464         }
465
466         if (attr->priority > 0xFFFF) {
467                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
468                 rte_flow_error_set(error, EINVAL,
469                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
470                                    attr, "Error priority.");
471                 return -rte_errno;
472         }
473         filter->priority = (uint16_t)attr->priority;
474         if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
475                 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
476                 filter->priority = 1;
477
478         return 0;
479 }
480
481 /* a specific function for txgbe because the flags is specific */
482 static int
483 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
484                           const struct rte_flow_attr *attr,
485                           const struct rte_flow_item pattern[],
486                           const struct rte_flow_action actions[],
487                           struct rte_eth_ntuple_filter *filter,
488                           struct rte_flow_error *error)
489 {
490         int ret;
491
492         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
493
494         if (ret)
495                 return ret;
496
497         /* txgbe doesn't support tcp flags */
498         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
499                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
500                 rte_flow_error_set(error, EINVAL,
501                                    RTE_FLOW_ERROR_TYPE_ITEM,
502                                    NULL, "Not supported by ntuple filter");
503                 return -rte_errno;
504         }
505
506         /* txgbe doesn't support many priorities */
507         if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
508             filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
509                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
510                 rte_flow_error_set(error, EINVAL,
511                         RTE_FLOW_ERROR_TYPE_ITEM,
512                         NULL, "Priority not supported by ntuple filter");
513                 return -rte_errno;
514         }
515
516         if (filter->queue >= dev->data->nb_rx_queues) {
517                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
518                 rte_flow_error_set(error, EINVAL,
519                                    RTE_FLOW_ERROR_TYPE_ITEM,
520                                    NULL, "Not supported by ntuple filter");
521                 return -rte_errno;
522         }
523
524         /* fixed value for txgbe */
525         filter->flags = RTE_5TUPLE_FLAGS;
526         return 0;
527 }
528
529 /**
530  * Parse the rule to see if it is a ethertype rule.
531  * And get the ethertype filter info BTW.
532  * pattern:
533  * The first not void item can be ETH.
534  * The next not void item must be END.
535  * action:
536  * The first not void action should be QUEUE.
537  * The next not void action should be END.
538  * pattern example:
539  * ITEM         Spec                    Mask
540  * ETH          type    0x0807          0xFFFF
541  * END
542  * other members in mask and spec should set to 0x00.
543  * item->last should be NULL.
544  */
545 static int
546 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
547                             const struct rte_flow_item *pattern,
548                             const struct rte_flow_action *actions,
549                             struct rte_eth_ethertype_filter *filter,
550                             struct rte_flow_error *error)
551 {
552         const struct rte_flow_item *item;
553         const struct rte_flow_action *act;
554         const struct rte_flow_item_eth *eth_spec;
555         const struct rte_flow_item_eth *eth_mask;
556         const struct rte_flow_action_queue *act_q;
557
558         if (!pattern) {
559                 rte_flow_error_set(error, EINVAL,
560                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
561                                 NULL, "NULL pattern.");
562                 return -rte_errno;
563         }
564
565         if (!actions) {
566                 rte_flow_error_set(error, EINVAL,
567                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
568                                 NULL, "NULL action.");
569                 return -rte_errno;
570         }
571
572         if (!attr) {
573                 rte_flow_error_set(error, EINVAL,
574                                    RTE_FLOW_ERROR_TYPE_ATTR,
575                                    NULL, "NULL attribute.");
576                 return -rte_errno;
577         }
578
579         item = next_no_void_pattern(pattern, NULL);
580         /* The first non-void item should be MAC. */
581         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
582                 rte_flow_error_set(error, EINVAL,
583                         RTE_FLOW_ERROR_TYPE_ITEM,
584                         item, "Not supported by ethertype filter");
585                 return -rte_errno;
586         }
587
588         /*Not supported last point for range*/
589         if (item->last) {
590                 rte_flow_error_set(error, EINVAL,
591                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
592                         item, "Not supported last point for range");
593                 return -rte_errno;
594         }
595
596         /* Get the MAC info. */
597         if (!item->spec || !item->mask) {
598                 rte_flow_error_set(error, EINVAL,
599                                 RTE_FLOW_ERROR_TYPE_ITEM,
600                                 item, "Not supported by ethertype filter");
601                 return -rte_errno;
602         }
603
604         eth_spec = item->spec;
605         eth_mask = item->mask;
606
607         /* Mask bits of source MAC address must be full of 0.
608          * Mask bits of destination MAC address must be full
609          * of 1 or full of 0.
610          */
611         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
612             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
613              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
614                 rte_flow_error_set(error, EINVAL,
615                                 RTE_FLOW_ERROR_TYPE_ITEM,
616                                 item, "Invalid ether address mask");
617                 return -rte_errno;
618         }
619
620         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
621                 rte_flow_error_set(error, EINVAL,
622                                 RTE_FLOW_ERROR_TYPE_ITEM,
623                                 item, "Invalid ethertype mask");
624                 return -rte_errno;
625         }
626
627         /* If mask bits of destination MAC address
628          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
629          */
630         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
631                 filter->mac_addr = eth_spec->dst;
632                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
633         } else {
634                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
635         }
636         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
637
638         /* Check if the next non-void item is END. */
639         item = next_no_void_pattern(pattern, item);
640         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
641                 rte_flow_error_set(error, EINVAL,
642                                 RTE_FLOW_ERROR_TYPE_ITEM,
643                                 item, "Not supported by ethertype filter.");
644                 return -rte_errno;
645         }
646
647         /* Parse action */
648
649         act = next_no_void_action(actions, NULL);
650         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
651             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
652                 rte_flow_error_set(error, EINVAL,
653                                 RTE_FLOW_ERROR_TYPE_ACTION,
654                                 act, "Not supported action.");
655                 return -rte_errno;
656         }
657
658         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
659                 act_q = (const struct rte_flow_action_queue *)act->conf;
660                 filter->queue = act_q->index;
661         } else {
662                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
663         }
664
665         /* Check if the next non-void item is END */
666         act = next_no_void_action(actions, act);
667         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
668                 rte_flow_error_set(error, EINVAL,
669                                 RTE_FLOW_ERROR_TYPE_ACTION,
670                                 act, "Not supported action.");
671                 return -rte_errno;
672         }
673
674         /* Parse attr */
675         /* Must be input direction */
676         if (!attr->ingress) {
677                 rte_flow_error_set(error, EINVAL,
678                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
679                                 attr, "Only support ingress.");
680                 return -rte_errno;
681         }
682
683         /* Not supported */
684         if (attr->egress) {
685                 rte_flow_error_set(error, EINVAL,
686                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
687                                 attr, "Not support egress.");
688                 return -rte_errno;
689         }
690
691         /* Not supported */
692         if (attr->transfer) {
693                 rte_flow_error_set(error, EINVAL,
694                                 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
695                                 attr, "No support for transfer.");
696                 return -rte_errno;
697         }
698
699         /* Not supported */
700         if (attr->priority) {
701                 rte_flow_error_set(error, EINVAL,
702                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
703                                 attr, "Not support priority.");
704                 return -rte_errno;
705         }
706
707         /* Not supported */
708         if (attr->group) {
709                 rte_flow_error_set(error, EINVAL,
710                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
711                                 attr, "Not support group.");
712                 return -rte_errno;
713         }
714
715         return 0;
716 }
717
718 static int
719 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
720                              const struct rte_flow_attr *attr,
721                              const struct rte_flow_item pattern[],
722                              const struct rte_flow_action actions[],
723                              struct rte_eth_ethertype_filter *filter,
724                              struct rte_flow_error *error)
725 {
726         int ret;
727
728         ret = cons_parse_ethertype_filter(attr, pattern,
729                                         actions, filter, error);
730
731         if (ret)
732                 return ret;
733
734         if (filter->queue >= dev->data->nb_rx_queues) {
735                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
736                 rte_flow_error_set(error, EINVAL,
737                         RTE_FLOW_ERROR_TYPE_ITEM,
738                         NULL, "queue index much too big");
739                 return -rte_errno;
740         }
741
742         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
743                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
744                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
745                 rte_flow_error_set(error, EINVAL,
746                         RTE_FLOW_ERROR_TYPE_ITEM,
747                         NULL, "IPv4/IPv6 not supported by ethertype filter");
748                 return -rte_errno;
749         }
750
751         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
752                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
753                 rte_flow_error_set(error, EINVAL,
754                         RTE_FLOW_ERROR_TYPE_ITEM,
755                         NULL, "mac compare is unsupported");
756                 return -rte_errno;
757         }
758
759         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
760                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
761                 rte_flow_error_set(error, EINVAL,
762                         RTE_FLOW_ERROR_TYPE_ITEM,
763                         NULL, "drop option is unsupported");
764                 return -rte_errno;
765         }
766
767         return 0;
768 }
769
770 /**
771  * Parse the rule to see if it is a TCP SYN rule.
772  * And get the TCP SYN filter info BTW.
773  * pattern:
774  * The first not void item must be ETH.
775  * The second not void item must be IPV4 or IPV6.
776  * The third not void item must be TCP.
777  * The next not void item must be END.
778  * action:
779  * The first not void action should be QUEUE.
780  * The next not void action should be END.
781  * pattern example:
782  * ITEM         Spec                    Mask
783  * ETH          NULL                    NULL
784  * IPV4/IPV6    NULL                    NULL
785  * TCP          tcp_flags       0x02    0xFF
786  * END
787  * other members in mask and spec should set to 0x00.
788  * item->last should be NULL.
789  */
790 static int
791 cons_parse_syn_filter(const struct rte_flow_attr *attr,
792                                 const struct rte_flow_item pattern[],
793                                 const struct rte_flow_action actions[],
794                                 struct rte_eth_syn_filter *filter,
795                                 struct rte_flow_error *error)
796 {
797         const struct rte_flow_item *item;
798         const struct rte_flow_action *act;
799         const struct rte_flow_item_tcp *tcp_spec;
800         const struct rte_flow_item_tcp *tcp_mask;
801         const struct rte_flow_action_queue *act_q;
802
803         if (!pattern) {
804                 rte_flow_error_set(error, EINVAL,
805                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
806                                 NULL, "NULL pattern.");
807                 return -rte_errno;
808         }
809
810         if (!actions) {
811                 rte_flow_error_set(error, EINVAL,
812                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
813                                 NULL, "NULL action.");
814                 return -rte_errno;
815         }
816
817         if (!attr) {
818                 rte_flow_error_set(error, EINVAL,
819                                    RTE_FLOW_ERROR_TYPE_ATTR,
820                                    NULL, "NULL attribute.");
821                 return -rte_errno;
822         }
823
824
825         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
826         item = next_no_void_pattern(pattern, NULL);
827         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
828             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
829             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
830             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
831                 rte_flow_error_set(error, EINVAL,
832                                 RTE_FLOW_ERROR_TYPE_ITEM,
833                                 item, "Not supported by syn filter");
834                 return -rte_errno;
835         }
836                 /*Not supported last point for range*/
837         if (item->last) {
838                 rte_flow_error_set(error, EINVAL,
839                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
840                         item, "Not supported last point for range");
841                 return -rte_errno;
842         }
843
844         /* Skip Ethernet */
845         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
846                 /* if the item is MAC, the content should be NULL */
847                 if (item->spec || item->mask) {
848                         rte_flow_error_set(error, EINVAL,
849                                 RTE_FLOW_ERROR_TYPE_ITEM,
850                                 item, "Invalid SYN address mask");
851                         return -rte_errno;
852                 }
853
854                 /* check if the next not void item is IPv4 or IPv6 */
855                 item = next_no_void_pattern(pattern, item);
856                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
857                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
858                         rte_flow_error_set(error, EINVAL,
859                                 RTE_FLOW_ERROR_TYPE_ITEM,
860                                 item, "Not supported by syn filter");
861                         return -rte_errno;
862                 }
863         }
864
865         /* Skip IP */
866         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
867             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
868                 /* if the item is IP, the content should be NULL */
869                 if (item->spec || item->mask) {
870                         rte_flow_error_set(error, EINVAL,
871                                 RTE_FLOW_ERROR_TYPE_ITEM,
872                                 item, "Invalid SYN mask");
873                         return -rte_errno;
874                 }
875
876                 /* check if the next not void item is TCP */
877                 item = next_no_void_pattern(pattern, item);
878                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
879                         rte_flow_error_set(error, EINVAL,
880                                 RTE_FLOW_ERROR_TYPE_ITEM,
881                                 item, "Not supported by syn filter");
882                         return -rte_errno;
883                 }
884         }
885
886         /* Get the TCP info. Only support SYN. */
887         if (!item->spec || !item->mask) {
888                 rte_flow_error_set(error, EINVAL,
889                                 RTE_FLOW_ERROR_TYPE_ITEM,
890                                 item, "Invalid SYN mask");
891                 return -rte_errno;
892         }
893         /*Not supported last point for range*/
894         if (item->last) {
895                 rte_flow_error_set(error, EINVAL,
896                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
897                         item, "Not supported last point for range");
898                 return -rte_errno;
899         }
900
901         tcp_spec = item->spec;
902         tcp_mask = item->mask;
903         if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
904             tcp_mask->hdr.src_port ||
905             tcp_mask->hdr.dst_port ||
906             tcp_mask->hdr.sent_seq ||
907             tcp_mask->hdr.recv_ack ||
908             tcp_mask->hdr.data_off ||
909             tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
910             tcp_mask->hdr.rx_win ||
911             tcp_mask->hdr.cksum ||
912             tcp_mask->hdr.tcp_urp) {
913                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
914                 rte_flow_error_set(error, EINVAL,
915                                 RTE_FLOW_ERROR_TYPE_ITEM,
916                                 item, "Not supported by syn filter");
917                 return -rte_errno;
918         }
919
920         /* check if the next not void item is END */
921         item = next_no_void_pattern(pattern, item);
922         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
923                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
924                 rte_flow_error_set(error, EINVAL,
925                                 RTE_FLOW_ERROR_TYPE_ITEM,
926                                 item, "Not supported by syn filter");
927                 return -rte_errno;
928         }
929
930         /* check if the first not void action is QUEUE. */
931         act = next_no_void_action(actions, NULL);
932         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
933                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
934                 rte_flow_error_set(error, EINVAL,
935                                 RTE_FLOW_ERROR_TYPE_ACTION,
936                                 act, "Not supported action.");
937                 return -rte_errno;
938         }
939
940         act_q = (const struct rte_flow_action_queue *)act->conf;
941         filter->queue = act_q->index;
942         if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
943                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
944                 rte_flow_error_set(error, EINVAL,
945                                 RTE_FLOW_ERROR_TYPE_ACTION,
946                                 act, "Not supported action.");
947                 return -rte_errno;
948         }
949
950         /* check if the next not void item is END */
951         act = next_no_void_action(actions, act);
952         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
953                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
954                 rte_flow_error_set(error, EINVAL,
955                                 RTE_FLOW_ERROR_TYPE_ACTION,
956                                 act, "Not supported action.");
957                 return -rte_errno;
958         }
959
960         /* parse attr */
961         /* must be input direction */
962         if (!attr->ingress) {
963                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
964                 rte_flow_error_set(error, EINVAL,
965                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
966                         attr, "Only support ingress.");
967                 return -rte_errno;
968         }
969
970         /* not supported */
971         if (attr->egress) {
972                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
973                 rte_flow_error_set(error, EINVAL,
974                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
975                         attr, "Not support egress.");
976                 return -rte_errno;
977         }
978
979         /* not supported */
980         if (attr->transfer) {
981                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
982                 rte_flow_error_set(error, EINVAL,
983                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
984                         attr, "No support for transfer.");
985                 return -rte_errno;
986         }
987
988         /* Support 2 priorities, the lowest or highest. */
989         if (!attr->priority) {
990                 filter->hig_pri = 0;
991         } else if (attr->priority == (uint32_t)~0U) {
992                 filter->hig_pri = 1;
993         } else {
994                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
995                 rte_flow_error_set(error, EINVAL,
996                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
997                         attr, "Not support priority.");
998                 return -rte_errno;
999         }
1000
1001         return 0;
1002 }
1003
1004 static int
1005 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1006                              const struct rte_flow_attr *attr,
1007                              const struct rte_flow_item pattern[],
1008                              const struct rte_flow_action actions[],
1009                              struct rte_eth_syn_filter *filter,
1010                              struct rte_flow_error *error)
1011 {
1012         int ret;
1013
1014         ret = cons_parse_syn_filter(attr, pattern,
1015                                         actions, filter, error);
1016
1017         if (filter->queue >= dev->data->nb_rx_queues)
1018                 return -rte_errno;
1019
1020         if (ret)
1021                 return ret;
1022
1023         return 0;
1024 }
1025
1026 /**
1027  * Parse the rule to see if it is a L2 tunnel rule.
1028  * And get the L2 tunnel filter info BTW.
1029  * Only support E-tag now.
1030  * pattern:
1031  * The first not void item can be E_TAG.
1032  * The next not void item must be END.
1033  * action:
1034  * The first not void action should be VF or PF.
1035  * The next not void action should be END.
1036  * pattern example:
1037  * ITEM         Spec                    Mask
1038  * E_TAG        grp             0x1     0x3
1039                 e_cid_base      0x309   0xFFF
1040  * END
1041  * other members in mask and spec should set to 0x00.
1042  * item->last should be NULL.
1043  */
1044 static int
1045 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1046                         const struct rte_flow_attr *attr,
1047                         const struct rte_flow_item pattern[],
1048                         const struct rte_flow_action actions[],
1049                         struct txgbe_l2_tunnel_conf *filter,
1050                         struct rte_flow_error *error)
1051 {
1052         const struct rte_flow_item *item;
1053         const struct rte_flow_item_e_tag *e_tag_spec;
1054         const struct rte_flow_item_e_tag *e_tag_mask;
1055         const struct rte_flow_action *act;
1056         const struct rte_flow_action_vf *act_vf;
1057         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1058
1059         if (!pattern) {
1060                 rte_flow_error_set(error, EINVAL,
1061                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1062                         NULL, "NULL pattern.");
1063                 return -rte_errno;
1064         }
1065
1066         if (!actions) {
1067                 rte_flow_error_set(error, EINVAL,
1068                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1069                                    NULL, "NULL action.");
1070                 return -rte_errno;
1071         }
1072
1073         if (!attr) {
1074                 rte_flow_error_set(error, EINVAL,
1075                                    RTE_FLOW_ERROR_TYPE_ATTR,
1076                                    NULL, "NULL attribute.");
1077                 return -rte_errno;
1078         }
1079
1080         /* The first not void item should be e-tag. */
1081         item = next_no_void_pattern(pattern, NULL);
1082         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1083                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1084                 rte_flow_error_set(error, EINVAL,
1085                         RTE_FLOW_ERROR_TYPE_ITEM,
1086                         item, "Not supported by L2 tunnel filter");
1087                 return -rte_errno;
1088         }
1089
1090         if (!item->spec || !item->mask) {
1091                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1092                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1093                         item, "Not supported by L2 tunnel filter");
1094                 return -rte_errno;
1095         }
1096
1097         /*Not supported last point for range*/
1098         if (item->last) {
1099                 rte_flow_error_set(error, EINVAL,
1100                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1101                         item, "Not supported last point for range");
1102                 return -rte_errno;
1103         }
1104
1105         e_tag_spec = item->spec;
1106         e_tag_mask = item->mask;
1107
1108         /* Only care about GRP and E cid base. */
1109         if (e_tag_mask->epcp_edei_in_ecid_b ||
1110             e_tag_mask->in_ecid_e ||
1111             e_tag_mask->ecid_e ||
1112             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1113                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1114                 rte_flow_error_set(error, EINVAL,
1115                         RTE_FLOW_ERROR_TYPE_ITEM,
1116                         item, "Not supported by L2 tunnel filter");
1117                 return -rte_errno;
1118         }
1119
1120         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1121         /**
1122          * grp and e_cid_base are bit fields and only use 14 bits.
1123          * e-tag id is taken as little endian by HW.
1124          */
1125         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1126
1127         /* check if the next not void item is END */
1128         item = next_no_void_pattern(pattern, item);
1129         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1130                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1131                 rte_flow_error_set(error, EINVAL,
1132                         RTE_FLOW_ERROR_TYPE_ITEM,
1133                         item, "Not supported by L2 tunnel filter");
1134                 return -rte_errno;
1135         }
1136
1137         /* parse attr */
1138         /* must be input direction */
1139         if (!attr->ingress) {
1140                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1141                 rte_flow_error_set(error, EINVAL,
1142                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1143                         attr, "Only support ingress.");
1144                 return -rte_errno;
1145         }
1146
1147         /* not supported */
1148         if (attr->egress) {
1149                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1150                 rte_flow_error_set(error, EINVAL,
1151                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1152                         attr, "Not support egress.");
1153                 return -rte_errno;
1154         }
1155
1156         /* not supported */
1157         if (attr->transfer) {
1158                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1159                 rte_flow_error_set(error, EINVAL,
1160                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1161                         attr, "No support for transfer.");
1162                 return -rte_errno;
1163         }
1164
1165         /* not supported */
1166         if (attr->priority) {
1167                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1168                 rte_flow_error_set(error, EINVAL,
1169                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1170                         attr, "Not support priority.");
1171                 return -rte_errno;
1172         }
1173
1174         /* check if the first not void action is VF or PF. */
1175         act = next_no_void_action(actions, NULL);
1176         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1177                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1178                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1179                 rte_flow_error_set(error, EINVAL,
1180                         RTE_FLOW_ERROR_TYPE_ACTION,
1181                         act, "Not supported action.");
1182                 return -rte_errno;
1183         }
1184
1185         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1186                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1187                 filter->pool = act_vf->id;
1188         } else {
1189                 filter->pool = pci_dev->max_vfs;
1190         }
1191
1192         /* check if the next not void item is END */
1193         act = next_no_void_action(actions, act);
1194         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1195                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1196                 rte_flow_error_set(error, EINVAL,
1197                         RTE_FLOW_ERROR_TYPE_ACTION,
1198                         act, "Not supported action.");
1199                 return -rte_errno;
1200         }
1201
1202         return 0;
1203 }
1204
1205 static int
1206 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1207                         const struct rte_flow_attr *attr,
1208                         const struct rte_flow_item pattern[],
1209                         const struct rte_flow_action actions[],
1210                         struct txgbe_l2_tunnel_conf *l2_tn_filter,
1211                         struct rte_flow_error *error)
1212 {
1213         int ret = 0;
1214         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1215         uint16_t vf_num;
1216
1217         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1218                                 actions, l2_tn_filter, error);
1219
1220         vf_num = pci_dev->max_vfs;
1221
1222         if (l2_tn_filter->pool > vf_num)
1223                 return -rte_errno;
1224
1225         return ret;
1226 }
1227
1228 /* Parse to get the attr and action info of flow director rule. */
1229 static int
1230 txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1231                           const struct rte_flow_action actions[],
1232                           struct txgbe_fdir_rule *rule,
1233                           struct rte_flow_error *error)
1234 {
1235         const struct rte_flow_action *act;
1236         const struct rte_flow_action_queue *act_q;
1237         const struct rte_flow_action_mark *mark;
1238
1239         /* parse attr */
1240         /* must be input direction */
1241         if (!attr->ingress) {
1242                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1243                 rte_flow_error_set(error, EINVAL,
1244                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1245                         attr, "Only support ingress.");
1246                 return -rte_errno;
1247         }
1248
1249         /* not supported */
1250         if (attr->egress) {
1251                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1252                 rte_flow_error_set(error, EINVAL,
1253                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1254                         attr, "Not support egress.");
1255                 return -rte_errno;
1256         }
1257
1258         /* not supported */
1259         if (attr->transfer) {
1260                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1261                 rte_flow_error_set(error, EINVAL,
1262                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1263                         attr, "No support for transfer.");
1264                 return -rte_errno;
1265         }
1266
1267         /* not supported */
1268         if (attr->priority) {
1269                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1270                 rte_flow_error_set(error, EINVAL,
1271                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1272                         attr, "Not support priority.");
1273                 return -rte_errno;
1274         }
1275
1276         /* check if the first not void action is QUEUE or DROP. */
1277         act = next_no_void_action(actions, NULL);
1278         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1279             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1280                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1281                 rte_flow_error_set(error, EINVAL,
1282                         RTE_FLOW_ERROR_TYPE_ACTION,
1283                         act, "Not supported action.");
1284                 return -rte_errno;
1285         }
1286
1287         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1288                 act_q = (const struct rte_flow_action_queue *)act->conf;
1289                 rule->queue = act_q->index;
1290         } else { /* drop */
1291                 /* signature mode does not support drop action. */
1292                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1293                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1294                         rte_flow_error_set(error, EINVAL,
1295                                 RTE_FLOW_ERROR_TYPE_ACTION,
1296                                 act, "Not supported action.");
1297                         return -rte_errno;
1298                 }
1299                 rule->fdirflags = TXGBE_FDIRPICMD_DROP;
1300         }
1301
1302         /* check if the next not void item is MARK */
1303         act = next_no_void_action(actions, act);
1304         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1305                 act->type != RTE_FLOW_ACTION_TYPE_END) {
1306                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1307                 rte_flow_error_set(error, EINVAL,
1308                         RTE_FLOW_ERROR_TYPE_ACTION,
1309                         act, "Not supported action.");
1310                 return -rte_errno;
1311         }
1312
1313         rule->soft_id = 0;
1314
1315         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1316                 mark = (const struct rte_flow_action_mark *)act->conf;
1317                 rule->soft_id = mark->id;
1318                 act = next_no_void_action(actions, act);
1319         }
1320
1321         /* check if the next not void item is END */
1322         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1323                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1324                 rte_flow_error_set(error, EINVAL,
1325                         RTE_FLOW_ERROR_TYPE_ACTION,
1326                         act, "Not supported action.");
1327                 return -rte_errno;
1328         }
1329
1330         return 0;
1331 }
1332
1333 /* search next no void pattern and skip fuzzy */
1334 static inline
1335 const struct rte_flow_item *next_no_fuzzy_pattern(
1336                 const struct rte_flow_item pattern[],
1337                 const struct rte_flow_item *cur)
1338 {
1339         const struct rte_flow_item *next =
1340                 next_no_void_pattern(pattern, cur);
1341         while (1) {
1342                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1343                         return next;
1344                 next = next_no_void_pattern(pattern, next);
1345         }
1346 }
1347
1348 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1349 {
1350         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1351         const struct rte_flow_item *item;
1352         uint32_t sh, lh, mh;
1353         int i = 0;
1354
1355         while (1) {
1356                 item = pattern + i;
1357                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1358                         break;
1359
1360                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1361                         spec = item->spec;
1362                         last = item->last;
1363                         mask = item->mask;
1364
1365                         if (!spec || !mask)
1366                                 return 0;
1367
1368                         sh = spec->thresh;
1369
1370                         if (!last)
1371                                 lh = sh;
1372                         else
1373                                 lh = last->thresh;
1374
1375                         mh = mask->thresh;
1376                         sh = sh & mh;
1377                         lh = lh & mh;
1378
1379                         if (!sh || sh > lh)
1380                                 return 0;
1381
1382                         return 1;
1383                 }
1384
1385                 i++;
1386         }
1387
1388         return 0;
1389 }
1390
1391 /**
1392  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1393  * And get the flow director filter info BTW.
1394  * UDP/TCP/SCTP PATTERN:
1395  * The first not void item can be ETH or IPV4 or IPV6
1396  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1397  * The next not void item could be UDP or TCP or SCTP (optional)
1398  * The next not void item could be RAW (for flexbyte, optional)
1399  * The next not void item must be END.
1400  * A Fuzzy Match pattern can appear at any place before END.
1401  * Fuzzy Match is optional for IPV4 but is required for IPV6
1402  * MAC VLAN PATTERN:
1403  * The first not void item must be ETH.
1404  * The second not void item must be MAC VLAN.
1405  * The next not void item must be END.
1406  * ACTION:
1407  * The first not void action should be QUEUE or DROP.
1408  * The second not void optional action should be MARK,
1409  * mark_id is a uint32_t number.
1410  * The next not void action should be END.
1411  * UDP/TCP/SCTP pattern example:
1412  * ITEM         Spec                    Mask
1413  * ETH          NULL                    NULL
1414  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1415  *              dst_addr 192.167.3.50   0xFFFFFFFF
1416  * UDP/TCP/SCTP src_port        80      0xFFFF
1417  *              dst_port        80      0xFFFF
1418  * FLEX relative        0       0x1
1419  *              search          0       0x1
1420  *              reserved        0       0
1421  *              offset          12      0xFFFFFFFF
1422  *              limit           0       0xFFFF
1423  *              length          2       0xFFFF
1424  *              pattern[0]      0x86    0xFF
1425  *              pattern[1]      0xDD    0xFF
1426  * END
1427  * MAC VLAN pattern example:
1428  * ITEM         Spec                    Mask
1429  * ETH          dst_addr
1430                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1431                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1432  * MAC VLAN     tci     0x2016          0xEFFF
1433  * END
1434  * Other members in mask and spec should set to 0x00.
1435  * Item->last should be NULL.
1436  */
1437 static int
1438 txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
1439                                const struct rte_flow_attr *attr,
1440                                const struct rte_flow_item pattern[],
1441                                const struct rte_flow_action actions[],
1442                                struct txgbe_fdir_rule *rule,
1443                                struct rte_flow_error *error)
1444 {
1445         const struct rte_flow_item *item;
1446         const struct rte_flow_item_eth *eth_mask;
1447         const struct rte_flow_item_ipv4 *ipv4_spec;
1448         const struct rte_flow_item_ipv4 *ipv4_mask;
1449         const struct rte_flow_item_ipv6 *ipv6_spec;
1450         const struct rte_flow_item_ipv6 *ipv6_mask;
1451         const struct rte_flow_item_tcp *tcp_spec;
1452         const struct rte_flow_item_tcp *tcp_mask;
1453         const struct rte_flow_item_udp *udp_spec;
1454         const struct rte_flow_item_udp *udp_mask;
1455         const struct rte_flow_item_sctp *sctp_spec;
1456         const struct rte_flow_item_sctp *sctp_mask;
1457         const struct rte_flow_item_raw *raw_mask;
1458         const struct rte_flow_item_raw *raw_spec;
1459         u32 ptype = 0;
1460         uint8_t j;
1461
1462         if (!pattern) {
1463                 rte_flow_error_set(error, EINVAL,
1464                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1465                         NULL, "NULL pattern.");
1466                 return -rte_errno;
1467         }
1468
1469         if (!actions) {
1470                 rte_flow_error_set(error, EINVAL,
1471                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1472                                    NULL, "NULL action.");
1473                 return -rte_errno;
1474         }
1475
1476         if (!attr) {
1477                 rte_flow_error_set(error, EINVAL,
1478                                    RTE_FLOW_ERROR_TYPE_ATTR,
1479                                    NULL, "NULL attribute.");
1480                 return -rte_errno;
1481         }
1482
1483         /**
1484          * Some fields may not be provided. Set spec to 0 and mask to default
1485          * value. So, we need not do anything for the not provided fields later.
1486          */
1487         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1488         memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
1489         rule->mask.vlan_tci_mask = 0;
1490         rule->mask.flex_bytes_mask = 0;
1491
1492         /**
1493          * The first not void item should be
1494          * MAC or IPv4 or TCP or UDP or SCTP.
1495          */
1496         item = next_no_fuzzy_pattern(pattern, NULL);
1497         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1498             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1499             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1500             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1501             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1502             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1503                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1504                 rte_flow_error_set(error, EINVAL,
1505                         RTE_FLOW_ERROR_TYPE_ITEM,
1506                         item, "Not supported by fdir filter");
1507                 return -rte_errno;
1508         }
1509
1510         if (signature_match(pattern))
1511                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1512         else
1513                 rule->mode = RTE_FDIR_MODE_PERFECT;
1514
1515         /*Not supported last point for range*/
1516         if (item->last) {
1517                 rte_flow_error_set(error, EINVAL,
1518                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1519                         item, "Not supported last point for range");
1520                 return -rte_errno;
1521         }
1522
1523         /* Get the MAC info. */
1524         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1525                 /**
1526                  * Only support vlan and dst MAC address,
1527                  * others should be masked.
1528                  */
1529                 if (item->spec && !item->mask) {
1530                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1531                         rte_flow_error_set(error, EINVAL,
1532                                 RTE_FLOW_ERROR_TYPE_ITEM,
1533                                 item, "Not supported by fdir filter");
1534                         return -rte_errno;
1535                 }
1536
1537                 if (item->mask) {
1538                         rule->b_mask = TRUE;
1539                         eth_mask = item->mask;
1540
1541                         /* Ether type should be masked. */
1542                         if (eth_mask->type ||
1543                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1544                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1545                                 rte_flow_error_set(error, EINVAL,
1546                                         RTE_FLOW_ERROR_TYPE_ITEM,
1547                                         item, "Not supported by fdir filter");
1548                                 return -rte_errno;
1549                         }
1550
1551                         /* If ethernet has meaning, it means MAC VLAN mode. */
1552                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1553
1554                         /**
1555                          * src MAC address must be masked,
1556                          * and don't support dst MAC address mask.
1557                          */
1558                         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1559                                 if (eth_mask->src.addr_bytes[j] ||
1560                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1561                                         memset(rule, 0,
1562                                         sizeof(struct txgbe_fdir_rule));
1563                                         rte_flow_error_set(error, EINVAL,
1564                                         RTE_FLOW_ERROR_TYPE_ITEM,
1565                                         item, "Not supported by fdir filter");
1566                                         return -rte_errno;
1567                                 }
1568                         }
1569
1570                         /* When no VLAN, considered as full mask. */
1571                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1572                 }
1573                 /*** If both spec and mask are item,
1574                  * it means don't care about ETH.
1575                  * Do nothing.
1576                  */
1577
1578                 /**
1579                  * Check if the next not void item is vlan or ipv4.
1580                  * IPv6 is not supported.
1581                  */
1582                 item = next_no_fuzzy_pattern(pattern, item);
1583                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1584                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1585                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1586                                 rte_flow_error_set(error, EINVAL,
1587                                         RTE_FLOW_ERROR_TYPE_ITEM,
1588                                         item, "Not supported by fdir filter");
1589                                 return -rte_errno;
1590                         }
1591                 } else {
1592                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1593                                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1594                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1595                                 rte_flow_error_set(error, EINVAL,
1596                                         RTE_FLOW_ERROR_TYPE_ITEM,
1597                                         item, "Not supported by fdir filter");
1598                                 return -rte_errno;
1599                         }
1600                 }
1601         }
1602
1603         /* Get the IPV4 info. */
1604         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1605                 /**
1606                  * Set the flow type even if there's no content
1607                  * as we must have a flow type.
1608                  */
1609                 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
1610                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
1611                 /*Not supported last point for range*/
1612                 if (item->last) {
1613                         rte_flow_error_set(error, EINVAL,
1614                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1615                                 item, "Not supported last point for range");
1616                         return -rte_errno;
1617                 }
1618                 /**
1619                  * Only care about src & dst addresses,
1620                  * others should be masked.
1621                  */
1622                 if (!item->mask) {
1623                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1624                         rte_flow_error_set(error, EINVAL,
1625                                 RTE_FLOW_ERROR_TYPE_ITEM,
1626                                 item, "Not supported by fdir filter");
1627                         return -rte_errno;
1628                 }
1629                 rule->b_mask = TRUE;
1630                 ipv4_mask = item->mask;
1631                 if (ipv4_mask->hdr.version_ihl ||
1632                     ipv4_mask->hdr.type_of_service ||
1633                     ipv4_mask->hdr.total_length ||
1634                     ipv4_mask->hdr.packet_id ||
1635                     ipv4_mask->hdr.fragment_offset ||
1636                     ipv4_mask->hdr.time_to_live ||
1637                     ipv4_mask->hdr.next_proto_id ||
1638                     ipv4_mask->hdr.hdr_checksum) {
1639                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1640                         rte_flow_error_set(error, EINVAL,
1641                                 RTE_FLOW_ERROR_TYPE_ITEM,
1642                                 item, "Not supported by fdir filter");
1643                         return -rte_errno;
1644                 }
1645                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1646                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1647
1648                 if (item->spec) {
1649                         rule->b_spec = TRUE;
1650                         ipv4_spec = item->spec;
1651                         rule->input.dst_ip[0] =
1652                                 ipv4_spec->hdr.dst_addr;
1653                         rule->input.src_ip[0] =
1654                                 ipv4_spec->hdr.src_addr;
1655                 }
1656
1657                 /**
1658                  * Check if the next not void item is
1659                  * TCP or UDP or SCTP or END.
1660                  */
1661                 item = next_no_fuzzy_pattern(pattern, item);
1662                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1663                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1664                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1665                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1666                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1667                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1668                         rte_flow_error_set(error, EINVAL,
1669                                 RTE_FLOW_ERROR_TYPE_ITEM,
1670                                 item, "Not supported by fdir filter");
1671                         return -rte_errno;
1672                 }
1673         }
1674
1675         /* Get the IPV6 info. */
1676         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1677                 /**
1678                  * Set the flow type even if there's no content
1679                  * as we must have a flow type.
1680                  */
1681                 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
1682                 ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
1683
1684                 /**
1685                  * 1. must signature match
1686                  * 2. not support last
1687                  * 3. mask must not null
1688                  */
1689                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1690                     item->last ||
1691                     !item->mask) {
1692                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1693                         rte_flow_error_set(error, EINVAL,
1694                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1695                                 item, "Not supported last point for range");
1696                         return -rte_errno;
1697                 }
1698
1699                 rule->b_mask = TRUE;
1700                 ipv6_mask = item->mask;
1701                 if (ipv6_mask->hdr.vtc_flow ||
1702                     ipv6_mask->hdr.payload_len ||
1703                     ipv6_mask->hdr.proto ||
1704                     ipv6_mask->hdr.hop_limits) {
1705                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1706                         rte_flow_error_set(error, EINVAL,
1707                                 RTE_FLOW_ERROR_TYPE_ITEM,
1708                                 item, "Not supported by fdir filter");
1709                         return -rte_errno;
1710                 }
1711
1712                 /* check src addr mask */
1713                 for (j = 0; j < 16; j++) {
1714                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1715                                 rule->mask.src_ipv6_mask |= 1 << j;
1716                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1717                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1718                                 rte_flow_error_set(error, EINVAL,
1719                                         RTE_FLOW_ERROR_TYPE_ITEM,
1720                                         item, "Not supported by fdir filter");
1721                                 return -rte_errno;
1722                         }
1723                 }
1724
1725                 /* check dst addr mask */
1726                 for (j = 0; j < 16; j++) {
1727                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1728                                 rule->mask.dst_ipv6_mask |= 1 << j;
1729                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1730                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1731                                 rte_flow_error_set(error, EINVAL,
1732                                         RTE_FLOW_ERROR_TYPE_ITEM,
1733                                         item, "Not supported by fdir filter");
1734                                 return -rte_errno;
1735                         }
1736                 }
1737
1738                 if (item->spec) {
1739                         rule->b_spec = TRUE;
1740                         ipv6_spec = item->spec;
1741                         rte_memcpy(rule->input.src_ip,
1742                                    ipv6_spec->hdr.src_addr, 16);
1743                         rte_memcpy(rule->input.dst_ip,
1744                                    ipv6_spec->hdr.dst_addr, 16);
1745                 }
1746
1747                 /**
1748                  * Check if the next not void item is
1749                  * TCP or UDP or SCTP or END.
1750                  */
1751                 item = next_no_fuzzy_pattern(pattern, item);
1752                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1753                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1754                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1755                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1756                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1757                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1758                         rte_flow_error_set(error, EINVAL,
1759                                 RTE_FLOW_ERROR_TYPE_ITEM,
1760                                 item, "Not supported by fdir filter");
1761                         return -rte_errno;
1762                 }
1763         }
1764
1765         /* Get the TCP info. */
1766         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1767                 /**
1768                  * Set the flow type even if there's no content
1769                  * as we must have a flow type.
1770                  */
1771                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
1772                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
1773                 /*Not supported last point for range*/
1774                 if (item->last) {
1775                         rte_flow_error_set(error, EINVAL,
1776                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1777                                 item, "Not supported last point for range");
1778                         return -rte_errno;
1779                 }
1780                 /**
1781                  * Only care about src & dst ports,
1782                  * others should be masked.
1783                  */
1784                 if (!item->mask) {
1785                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1786                         rte_flow_error_set(error, EINVAL,
1787                                 RTE_FLOW_ERROR_TYPE_ITEM,
1788                                 item, "Not supported by fdir filter");
1789                         return -rte_errno;
1790                 }
1791                 rule->b_mask = TRUE;
1792                 tcp_mask = item->mask;
1793                 if (tcp_mask->hdr.sent_seq ||
1794                     tcp_mask->hdr.recv_ack ||
1795                     tcp_mask->hdr.data_off ||
1796                     tcp_mask->hdr.tcp_flags ||
1797                     tcp_mask->hdr.rx_win ||
1798                     tcp_mask->hdr.cksum ||
1799                     tcp_mask->hdr.tcp_urp) {
1800                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1801                         rte_flow_error_set(error, EINVAL,
1802                                 RTE_FLOW_ERROR_TYPE_ITEM,
1803                                 item, "Not supported by fdir filter");
1804                         return -rte_errno;
1805                 }
1806                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1807                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1808
1809                 if (item->spec) {
1810                         rule->b_spec = TRUE;
1811                         tcp_spec = item->spec;
1812                         rule->input.src_port =
1813                                 tcp_spec->hdr.src_port;
1814                         rule->input.dst_port =
1815                                 tcp_spec->hdr.dst_port;
1816                 }
1817
1818                 item = next_no_fuzzy_pattern(pattern, item);
1819                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1820                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1821                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1822                         rte_flow_error_set(error, EINVAL,
1823                                 RTE_FLOW_ERROR_TYPE_ITEM,
1824                                 item, "Not supported by fdir filter");
1825                         return -rte_errno;
1826                 }
1827         }
1828
1829         /* Get the UDP info */
1830         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1831                 /**
1832                  * Set the flow type even if there's no content
1833                  * as we must have a flow type.
1834                  */
1835                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
1836                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
1837                 /*Not supported last point for range*/
1838                 if (item->last) {
1839                         rte_flow_error_set(error, EINVAL,
1840                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1841                                 item, "Not supported last point for range");
1842                         return -rte_errno;
1843                 }
1844                 /**
1845                  * Only care about src & dst ports,
1846                  * others should be masked.
1847                  */
1848                 if (!item->mask) {
1849                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1850                         rte_flow_error_set(error, EINVAL,
1851                                 RTE_FLOW_ERROR_TYPE_ITEM,
1852                                 item, "Not supported by fdir filter");
1853                         return -rte_errno;
1854                 }
1855                 rule->b_mask = TRUE;
1856                 udp_mask = item->mask;
1857                 if (udp_mask->hdr.dgram_len ||
1858                     udp_mask->hdr.dgram_cksum) {
1859                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1860                         rte_flow_error_set(error, EINVAL,
1861                                 RTE_FLOW_ERROR_TYPE_ITEM,
1862                                 item, "Not supported by fdir filter");
1863                         return -rte_errno;
1864                 }
1865                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1866                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1867
1868                 if (item->spec) {
1869                         rule->b_spec = TRUE;
1870                         udp_spec = item->spec;
1871                         rule->input.src_port =
1872                                 udp_spec->hdr.src_port;
1873                         rule->input.dst_port =
1874                                 udp_spec->hdr.dst_port;
1875                 }
1876
1877                 item = next_no_fuzzy_pattern(pattern, item);
1878                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1879                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1880                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1881                         rte_flow_error_set(error, EINVAL,
1882                                 RTE_FLOW_ERROR_TYPE_ITEM,
1883                                 item, "Not supported by fdir filter");
1884                         return -rte_errno;
1885                 }
1886         }
1887
1888         /* Get the SCTP info */
1889         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1890                 /**
1891                  * Set the flow type even if there's no content
1892                  * as we must have a flow type.
1893                  */
1894                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
1895                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
1896                 /*Not supported last point for range*/
1897                 if (item->last) {
1898                         rte_flow_error_set(error, EINVAL,
1899                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1900                                 item, "Not supported last point for range");
1901                         return -rte_errno;
1902                 }
1903
1904                 /**
1905                  * Only care about src & dst ports,
1906                  * others should be masked.
1907                  */
1908                 if (!item->mask) {
1909                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1910                         rte_flow_error_set(error, EINVAL,
1911                                 RTE_FLOW_ERROR_TYPE_ITEM,
1912                                 item, "Not supported by fdir filter");
1913                         return -rte_errno;
1914                 }
1915                 rule->b_mask = TRUE;
1916                 sctp_mask = item->mask;
1917                 if (sctp_mask->hdr.tag ||
1918                         sctp_mask->hdr.cksum) {
1919                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1920                         rte_flow_error_set(error, EINVAL,
1921                                 RTE_FLOW_ERROR_TYPE_ITEM,
1922                                 item, "Not supported by fdir filter");
1923                         return -rte_errno;
1924                 }
1925                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1926                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1927
1928                 if (item->spec) {
1929                         rule->b_spec = TRUE;
1930                         sctp_spec = item->spec;
1931                         rule->input.src_port =
1932                                 sctp_spec->hdr.src_port;
1933                         rule->input.dst_port =
1934                                 sctp_spec->hdr.dst_port;
1935                 }
1936                 /* others even sctp port is not supported */
1937                 sctp_mask = item->mask;
1938                 if (sctp_mask &&
1939                         (sctp_mask->hdr.src_port ||
1940                          sctp_mask->hdr.dst_port ||
1941                          sctp_mask->hdr.tag ||
1942                          sctp_mask->hdr.cksum)) {
1943                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1944                         rte_flow_error_set(error, EINVAL,
1945                                 RTE_FLOW_ERROR_TYPE_ITEM,
1946                                 item, "Not supported by fdir filter");
1947                         return -rte_errno;
1948                 }
1949
1950                 item = next_no_fuzzy_pattern(pattern, item);
1951                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1952                         item->type != RTE_FLOW_ITEM_TYPE_END) {
1953                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1954                         rte_flow_error_set(error, EINVAL,
1955                                 RTE_FLOW_ERROR_TYPE_ITEM,
1956                                 item, "Not supported by fdir filter");
1957                         return -rte_errno;
1958                 }
1959         }
1960
1961         /* Get the flex byte info */
1962         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1963                 /* Not supported last point for range*/
1964                 if (item->last) {
1965                         rte_flow_error_set(error, EINVAL,
1966                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1967                                 item, "Not supported last point for range");
1968                         return -rte_errno;
1969                 }
1970                 /* mask should not be null */
1971                 if (!item->mask || !item->spec) {
1972                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1973                         rte_flow_error_set(error, EINVAL,
1974                                 RTE_FLOW_ERROR_TYPE_ITEM,
1975                                 item, "Not supported by fdir filter");
1976                         return -rte_errno;
1977                 }
1978
1979                 raw_mask = item->mask;
1980
1981                 /* check mask */
1982                 if (raw_mask->relative != 0x1 ||
1983                     raw_mask->search != 0x1 ||
1984                     raw_mask->reserved != 0x0 ||
1985                     (uint32_t)raw_mask->offset != 0xffffffff ||
1986                     raw_mask->limit != 0xffff ||
1987                     raw_mask->length != 0xffff) {
1988                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1989                         rte_flow_error_set(error, EINVAL,
1990                                 RTE_FLOW_ERROR_TYPE_ITEM,
1991                                 item, "Not supported by fdir filter");
1992                         return -rte_errno;
1993                 }
1994
1995                 raw_spec = item->spec;
1996
1997                 /* check spec */
1998                 if (raw_spec->relative != 0 ||
1999                     raw_spec->search != 0 ||
2000                     raw_spec->reserved != 0 ||
2001                     raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
2002                     raw_spec->offset % 2 ||
2003                     raw_spec->limit != 0 ||
2004                     raw_spec->length != 2 ||
2005                     /* pattern can't be 0xffff */
2006                     (raw_spec->pattern[0] == 0xff &&
2007                      raw_spec->pattern[1] == 0xff)) {
2008                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2009                         rte_flow_error_set(error, EINVAL,
2010                                 RTE_FLOW_ERROR_TYPE_ITEM,
2011                                 item, "Not supported by fdir filter");
2012                         return -rte_errno;
2013                 }
2014
2015                 /* check pattern mask */
2016                 if (raw_mask->pattern[0] != 0xff ||
2017                     raw_mask->pattern[1] != 0xff) {
2018                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2019                         rte_flow_error_set(error, EINVAL,
2020                                 RTE_FLOW_ERROR_TYPE_ITEM,
2021                                 item, "Not supported by fdir filter");
2022                         return -rte_errno;
2023                 }
2024
2025                 rule->mask.flex_bytes_mask = 0xffff;
2026                 rule->input.flex_bytes =
2027                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2028                         raw_spec->pattern[0];
2029                 rule->flex_bytes_offset = raw_spec->offset;
2030         }
2031
2032         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2033                 /* check if the next not void item is END */
2034                 item = next_no_fuzzy_pattern(pattern, item);
2035                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2036                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2037                         rte_flow_error_set(error, EINVAL,
2038                                 RTE_FLOW_ERROR_TYPE_ITEM,
2039                                 item, "Not supported by fdir filter");
2040                         return -rte_errno;
2041                 }
2042         }
2043
2044         rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
2045
2046         return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2047 }
2048
2049 /**
2050  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2051  * And get the flow director filter info BTW.
2052  * VxLAN PATTERN:
2053  * The first not void item must be ETH.
2054  * The second not void item must be IPV4/ IPV6.
2055  * The third not void item must be NVGRE.
2056  * The next not void item must be END.
2057  * NVGRE PATTERN:
2058  * The first not void item must be ETH.
2059  * The second not void item must be IPV4/ IPV6.
2060  * The third not void item must be NVGRE.
2061  * The next not void item must be END.
2062  * ACTION:
2063  * The first not void action should be QUEUE or DROP.
2064  * The second not void optional action should be MARK,
2065  * mark_id is a uint32_t number.
2066  * The next not void action should be END.
2067  * VxLAN pattern example:
2068  * ITEM         Spec                    Mask
2069  * ETH          NULL                    NULL
2070  * IPV4/IPV6    NULL                    NULL
2071  * UDP          NULL                    NULL
2072  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2073  * MAC VLAN     tci     0x2016          0xEFFF
2074  * END
2075  * NEGRV pattern example:
2076  * ITEM         Spec                    Mask
2077  * ETH          NULL                    NULL
2078  * IPV4/IPV6    NULL                    NULL
2079  * NVGRE        protocol        0x6558  0xFFFF
2080  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2081  * MAC VLAN     tci     0x2016          0xEFFF
2082  * END
2083  * other members in mask and spec should set to 0x00.
2084  * item->last should be NULL.
2085  */
2086 static int
2087 txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2088                                const struct rte_flow_item pattern[],
2089                                const struct rte_flow_action actions[],
2090                                struct txgbe_fdir_rule *rule,
2091                                struct rte_flow_error *error)
2092 {
2093         const struct rte_flow_item *item;
2094         const struct rte_flow_item_eth *eth_mask;
2095         uint32_t j;
2096
2097         if (!pattern) {
2098                 rte_flow_error_set(error, EINVAL,
2099                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2100                                    NULL, "NULL pattern.");
2101                 return -rte_errno;
2102         }
2103
2104         if (!actions) {
2105                 rte_flow_error_set(error, EINVAL,
2106                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2107                                    NULL, "NULL action.");
2108                 return -rte_errno;
2109         }
2110
2111         if (!attr) {
2112                 rte_flow_error_set(error, EINVAL,
2113                                    RTE_FLOW_ERROR_TYPE_ATTR,
2114                                    NULL, "NULL attribute.");
2115                 return -rte_errno;
2116         }
2117
2118         /**
2119          * Some fields may not be provided. Set spec to 0 and mask to default
2120          * value. So, we need not do anything for the not provided fields later.
2121          */
2122         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2123         memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
2124         rule->mask.vlan_tci_mask = 0;
2125
2126         /**
2127          * The first not void item should be
2128          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2129          */
2130         item = next_no_void_pattern(pattern, NULL);
2131         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2132             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2133             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2134             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2135             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2136             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2137                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2138                 rte_flow_error_set(error, EINVAL,
2139                         RTE_FLOW_ERROR_TYPE_ITEM,
2140                         item, "Not supported by fdir filter");
2141                 return -rte_errno;
2142         }
2143
2144         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2145
2146         /* Skip MAC. */
2147         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2148                 /* Only used to describe the protocol stack. */
2149                 if (item->spec || item->mask) {
2150                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2151                         rte_flow_error_set(error, EINVAL,
2152                                 RTE_FLOW_ERROR_TYPE_ITEM,
2153                                 item, "Not supported by fdir filter");
2154                         return -rte_errno;
2155                 }
2156                 /* Not supported last point for range*/
2157                 if (item->last) {
2158                         rte_flow_error_set(error, EINVAL,
2159                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2160                                 item, "Not supported last point for range");
2161                         return -rte_errno;
2162                 }
2163
2164                 /* Check if the next not void item is IPv4 or IPv6. */
2165                 item = next_no_void_pattern(pattern, item);
2166                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2167                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2168                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2169                         rte_flow_error_set(error, EINVAL,
2170                                 RTE_FLOW_ERROR_TYPE_ITEM,
2171                                 item, "Not supported by fdir filter");
2172                         return -rte_errno;
2173                 }
2174         }
2175
2176         /* Skip IP. */
2177         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2178             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2179                 /* Only used to describe the protocol stack. */
2180                 if (item->spec || item->mask) {
2181                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2182                         rte_flow_error_set(error, EINVAL,
2183                                 RTE_FLOW_ERROR_TYPE_ITEM,
2184                                 item, "Not supported by fdir filter");
2185                         return -rte_errno;
2186                 }
2187                 /*Not supported last point for range*/
2188                 if (item->last) {
2189                         rte_flow_error_set(error, EINVAL,
2190                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2191                                 item, "Not supported last point for range");
2192                         return -rte_errno;
2193                 }
2194
2195                 /* Check if the next not void item is UDP or NVGRE. */
2196                 item = next_no_void_pattern(pattern, item);
2197                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2198                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2199                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2200                         rte_flow_error_set(error, EINVAL,
2201                                 RTE_FLOW_ERROR_TYPE_ITEM,
2202                                 item, "Not supported by fdir filter");
2203                         return -rte_errno;
2204                 }
2205         }
2206
2207         /* Skip UDP. */
2208         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2209                 /* Only used to describe the protocol stack. */
2210                 if (item->spec || item->mask) {
2211                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2212                         rte_flow_error_set(error, EINVAL,
2213                                 RTE_FLOW_ERROR_TYPE_ITEM,
2214                                 item, "Not supported by fdir filter");
2215                         return -rte_errno;
2216                 }
2217                 /*Not supported last point for range*/
2218                 if (item->last) {
2219                         rte_flow_error_set(error, EINVAL,
2220                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2221                                 item, "Not supported last point for range");
2222                         return -rte_errno;
2223                 }
2224
2225                 /* Check if the next not void item is VxLAN. */
2226                 item = next_no_void_pattern(pattern, item);
2227                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2228                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2229                         rte_flow_error_set(error, EINVAL,
2230                                 RTE_FLOW_ERROR_TYPE_ITEM,
2231                                 item, "Not supported by fdir filter");
2232                         return -rte_errno;
2233                 }
2234         }
2235
2236         /* check if the next not void item is MAC */
2237         item = next_no_void_pattern(pattern, item);
2238         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2239                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2240                 rte_flow_error_set(error, EINVAL,
2241                         RTE_FLOW_ERROR_TYPE_ITEM,
2242                         item, "Not supported by fdir filter");
2243                 return -rte_errno;
2244         }
2245
2246         /**
2247          * Only support vlan and dst MAC address,
2248          * others should be masked.
2249          */
2250
2251         if (!item->mask) {
2252                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2253                 rte_flow_error_set(error, EINVAL,
2254                         RTE_FLOW_ERROR_TYPE_ITEM,
2255                         item, "Not supported by fdir filter");
2256                 return -rte_errno;
2257         }
2258         /*Not supported last point for range*/
2259         if (item->last) {
2260                 rte_flow_error_set(error, EINVAL,
2261                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2262                         item, "Not supported last point for range");
2263                 return -rte_errno;
2264         }
2265         rule->b_mask = TRUE;
2266         eth_mask = item->mask;
2267
2268         /* Ether type should be masked. */
2269         if (eth_mask->type) {
2270                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2271                 rte_flow_error_set(error, EINVAL,
2272                         RTE_FLOW_ERROR_TYPE_ITEM,
2273                         item, "Not supported by fdir filter");
2274                 return -rte_errno;
2275         }
2276
2277         /* src MAC address should be masked. */
2278         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2279                 if (eth_mask->src.addr_bytes[j]) {
2280                         memset(rule, 0,
2281                                sizeof(struct txgbe_fdir_rule));
2282                         rte_flow_error_set(error, EINVAL,
2283                                 RTE_FLOW_ERROR_TYPE_ITEM,
2284                                 item, "Not supported by fdir filter");
2285                         return -rte_errno;
2286                 }
2287         }
2288         rule->mask.mac_addr_byte_mask = 0;
2289         for (j = 0; j < ETH_ADDR_LEN; j++) {
2290                 /* It's a per byte mask. */
2291                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2292                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2293                 } else if (eth_mask->dst.addr_bytes[j]) {
2294                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2295                         rte_flow_error_set(error, EINVAL,
2296                                 RTE_FLOW_ERROR_TYPE_ITEM,
2297                                 item, "Not supported by fdir filter");
2298                         return -rte_errno;
2299                 }
2300         }
2301
2302         /* When no vlan, considered as full mask. */
2303         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2304
2305         /**
2306          * Check if the next not void item is vlan or ipv4.
2307          * IPv6 is not supported.
2308          */
2309         item = next_no_void_pattern(pattern, item);
2310         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
2311                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2312                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2313                 rte_flow_error_set(error, EINVAL,
2314                         RTE_FLOW_ERROR_TYPE_ITEM,
2315                         item, "Not supported by fdir filter");
2316                 return -rte_errno;
2317         }
2318         /*Not supported last point for range*/
2319         if (item->last) {
2320                 rte_flow_error_set(error, EINVAL,
2321                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2322                         item, "Not supported last point for range");
2323                 return -rte_errno;
2324         }
2325
2326         /**
2327          * If the tags is 0, it means don't care about the VLAN.
2328          * Do nothing.
2329          */
2330
2331         return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2332 }
2333
2334 static int
2335 txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2336                         const struct rte_flow_attr *attr,
2337                         const struct rte_flow_item pattern[],
2338                         const struct rte_flow_action actions[],
2339                         struct txgbe_fdir_rule *rule,
2340                         struct rte_flow_error *error)
2341 {
2342         int ret;
2343         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2344         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2345
2346         ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
2347                                         actions, rule, error);
2348         if (!ret)
2349                 goto step_next;
2350
2351         ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
2352                                         actions, rule, error);
2353         if (ret)
2354                 return ret;
2355
2356 step_next:
2357
2358         if (hw->mac.type == txgbe_mac_raptor &&
2359                 rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
2360                 (rule->input.src_port != 0 || rule->input.dst_port != 0))
2361                 return -ENOTSUP;
2362
2363         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2364             fdir_mode != rule->mode)
2365                 return -ENOTSUP;
2366
2367         if (rule->queue >= dev->data->nb_rx_queues)
2368                 return -ENOTSUP;
2369
2370         return ret;
2371 }
2372
2373 static int
2374 txgbe_parse_rss_filter(struct rte_eth_dev *dev,
2375                         const struct rte_flow_attr *attr,
2376                         const struct rte_flow_action actions[],
2377                         struct txgbe_rte_flow_rss_conf *rss_conf,
2378                         struct rte_flow_error *error)
2379 {
2380         const struct rte_flow_action *act;
2381         const struct rte_flow_action_rss *rss;
2382         uint16_t n;
2383
2384         /**
2385          * rss only supports forwarding,
2386          * check if the first not void action is RSS.
2387          */
2388         act = next_no_void_action(actions, NULL);
2389         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2390                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2391                 rte_flow_error_set(error, EINVAL,
2392                         RTE_FLOW_ERROR_TYPE_ACTION,
2393                         act, "Not supported action.");
2394                 return -rte_errno;
2395         }
2396
2397         rss = (const struct rte_flow_action_rss *)act->conf;
2398
2399         if (!rss || !rss->queue_num) {
2400                 rte_flow_error_set(error, EINVAL,
2401                                 RTE_FLOW_ERROR_TYPE_ACTION,
2402                                 act,
2403                            "no valid queues");
2404                 return -rte_errno;
2405         }
2406
2407         for (n = 0; n < rss->queue_num; n++) {
2408                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2409                         rte_flow_error_set(error, EINVAL,
2410                                    RTE_FLOW_ERROR_TYPE_ACTION,
2411                                    act,
2412                                    "queue id > max number of queues");
2413                         return -rte_errno;
2414                 }
2415         }
2416
2417         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2418                 return rte_flow_error_set
2419                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2420                          "non-default RSS hash functions are not supported");
2421         if (rss->level)
2422                 return rte_flow_error_set
2423                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2424                          "a nonzero RSS encapsulation level is not supported");
2425         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2426                 return rte_flow_error_set
2427                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2428                          "RSS hash key must be exactly 40 bytes");
2429         if (rss->queue_num > RTE_DIM(rss_conf->queue))
2430                 return rte_flow_error_set
2431                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2432                          "too many queues for RSS context");
2433         if (txgbe_rss_conf_init(rss_conf, rss))
2434                 return rte_flow_error_set
2435                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2436                          "RSS context initialization failure");
2437
2438         /* check if the next not void item is END */
2439         act = next_no_void_action(actions, act);
2440         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2441                 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
2442                 rte_flow_error_set(error, EINVAL,
2443                         RTE_FLOW_ERROR_TYPE_ACTION,
2444                         act, "Not supported action.");
2445                 return -rte_errno;
2446         }
2447
2448         /* parse attr */
2449         /* must be input direction */
2450         if (!attr->ingress) {
2451                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2452                 rte_flow_error_set(error, EINVAL,
2453                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2454                                    attr, "Only support ingress.");
2455                 return -rte_errno;
2456         }
2457
2458         /* not supported */
2459         if (attr->egress) {
2460                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2461                 rte_flow_error_set(error, EINVAL,
2462                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2463                                    attr, "Not support egress.");
2464                 return -rte_errno;
2465         }
2466
2467         /* not supported */
2468         if (attr->transfer) {
2469                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2470                 rte_flow_error_set(error, EINVAL,
2471                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2472                                    attr, "No support for transfer.");
2473                 return -rte_errno;
2474         }
2475
2476         if (attr->priority > 0xFFFF) {
2477                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2478                 rte_flow_error_set(error, EINVAL,
2479                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2480                                    attr, "Error priority.");
2481                 return -rte_errno;
2482         }
2483
2484         return 0;
2485 }
2486
2487 /**
2488  * Create or destroy a flow rule.
2489  * Theorically one rule can match more than one filters.
2490  * We will let it use the filter which it hitt first.
2491  * So, the sequence matters.
2492  */
2493 static struct rte_flow *
2494 txgbe_flow_create(struct rte_eth_dev *dev,
2495                   const struct rte_flow_attr *attr,
2496                   const struct rte_flow_item pattern[],
2497                   const struct rte_flow_action actions[],
2498                   struct rte_flow_error *error)
2499 {
2500         struct rte_flow *flow = NULL;
2501         return flow;
2502 }
2503
2504 /**
2505  * Check if the flow rule is supported by txgbe.
2506  * It only checks the format. Don't guarantee the rule can be programmed into
2507  * the HW. Because there can be no enough room for the rule.
2508  */
2509 static int
2510 txgbe_flow_validate(struct rte_eth_dev *dev,
2511                 const struct rte_flow_attr *attr,
2512                 const struct rte_flow_item pattern[],
2513                 const struct rte_flow_action actions[],
2514                 struct rte_flow_error *error)
2515 {
2516         struct rte_eth_ntuple_filter ntuple_filter;
2517         struct rte_eth_ethertype_filter ethertype_filter;
2518         struct rte_eth_syn_filter syn_filter;
2519         struct txgbe_l2_tunnel_conf l2_tn_filter;
2520         struct txgbe_fdir_rule fdir_rule;
2521         struct txgbe_rte_flow_rss_conf rss_conf;
2522         int ret = 0;
2523
2524         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2525         ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2526                                 actions, &ntuple_filter, error);
2527         if (!ret)
2528                 return 0;
2529
2530         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2531         ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2532                                 actions, &ethertype_filter, error);
2533         if (!ret)
2534                 return 0;
2535
2536         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2537         ret = txgbe_parse_syn_filter(dev, attr, pattern,
2538                                 actions, &syn_filter, error);
2539         if (!ret)
2540                 return 0;
2541
2542         memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2543         ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2544                                 actions, &fdir_rule, error);
2545         if (!ret)
2546                 return 0;
2547
2548         memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2549         ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2550                                 actions, &l2_tn_filter, error);
2551         if (!ret)
2552                 return 0;
2553
2554         memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2555         ret = txgbe_parse_rss_filter(dev, attr,
2556                                         actions, &rss_conf, error);
2557
2558         return ret;
2559 }
2560
2561 /* Destroy a flow rule on txgbe. */
2562 static int
2563 txgbe_flow_destroy(struct rte_eth_dev *dev,
2564                 struct rte_flow *flow,
2565                 struct rte_flow_error *error)
2566 {
2567         int ret = 0;
2568
2569         return ret;
2570 }
2571
2572 /*  Destroy all flow rules associated with a port on txgbe. */
2573 static int
2574 txgbe_flow_flush(struct rte_eth_dev *dev,
2575                 struct rte_flow_error *error)
2576 {
2577         int ret = 0;
2578
2579         return ret;
2580 }
2581
2582 const struct rte_flow_ops txgbe_flow_ops = {
2583         .validate = txgbe_flow_validate,
2584         .create = txgbe_flow_create,
2585         .destroy = txgbe_flow_destroy,
2586         .flush = txgbe_flow_flush,
2587 };
2588