203a722d7014e96fbb9a4fa94904519988ffa919
[dpdk.git] / drivers / net / txgbe / txgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <sys/queue.h>
6 #include <rte_bus_pci.h>
7 #include <rte_flow.h>
8 #include <rte_flow_driver.h>
9
10 #include "txgbe_ethdev.h"
11
12 #define TXGBE_MIN_N_TUPLE_PRIO 1
13 #define TXGBE_MAX_N_TUPLE_PRIO 7
14
15 /**
16  * Endless loop will never happen with below assumption
17  * 1. there is at least one no-void item(END)
18  * 2. cur is before END.
19  */
20 static inline
21 const struct rte_flow_item *next_no_void_pattern(
22                 const struct rte_flow_item pattern[],
23                 const struct rte_flow_item *cur)
24 {
25         const struct rte_flow_item *next =
26                 cur ? cur + 1 : &pattern[0];
27         while (1) {
28                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
29                         return next;
30                 next++;
31         }
32 }
33
34 static inline
35 const struct rte_flow_action *next_no_void_action(
36                 const struct rte_flow_action actions[],
37                 const struct rte_flow_action *cur)
38 {
39         const struct rte_flow_action *next =
40                 cur ? cur + 1 : &actions[0];
41         while (1) {
42                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
43                         return next;
44                 next++;
45         }
46 }
47
48 /**
49  * Please aware there's an assumption for all the parsers.
50  * rte_flow_item is using big endian, rte_flow_attr and
51  * rte_flow_action are using CPU order.
52  * Because the pattern is used to describe the packets,
53  * normally the packets should use network order.
54  */
55
56 /**
57  * Parse the rule to see if it is a n-tuple rule.
58  * And get the n-tuple filter info BTW.
59  * pattern:
60  * The first not void item can be ETH or IPV4.
61  * The second not void item must be IPV4 if the first one is ETH.
62  * The third not void item must be UDP or TCP.
63  * The next not void item must be END.
64  * action:
65  * The first not void action should be QUEUE.
66  * The next not void action should be END.
67  * pattern example:
68  * ITEM         Spec                    Mask
69  * ETH          NULL                    NULL
70  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
71  *              dst_addr 192.167.3.50   0xFFFFFFFF
72  *              next_proto_id   17      0xFF
73  * UDP/TCP/     src_port        80      0xFFFF
74  * SCTP         dst_port        80      0xFFFF
75  * END
76  * other members in mask and spec should set to 0x00.
77  * item->last should be NULL.
78  */
79 static int
80 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
81                          const struct rte_flow_item pattern[],
82                          const struct rte_flow_action actions[],
83                          struct rte_eth_ntuple_filter *filter,
84                          struct rte_flow_error *error)
85 {
86         const struct rte_flow_item *item;
87         const struct rte_flow_action *act;
88         const struct rte_flow_item_ipv4 *ipv4_spec;
89         const struct rte_flow_item_ipv4 *ipv4_mask;
90         const struct rte_flow_item_tcp *tcp_spec;
91         const struct rte_flow_item_tcp *tcp_mask;
92         const struct rte_flow_item_udp *udp_spec;
93         const struct rte_flow_item_udp *udp_mask;
94         const struct rte_flow_item_sctp *sctp_spec;
95         const struct rte_flow_item_sctp *sctp_mask;
96         const struct rte_flow_item_eth *eth_spec;
97         const struct rte_flow_item_eth *eth_mask;
98         const struct rte_flow_item_vlan *vlan_spec;
99         const struct rte_flow_item_vlan *vlan_mask;
100         struct rte_flow_item_eth eth_null;
101         struct rte_flow_item_vlan vlan_null;
102
103         if (!pattern) {
104                 rte_flow_error_set(error,
105                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
106                         NULL, "NULL pattern.");
107                 return -rte_errno;
108         }
109
110         if (!actions) {
111                 rte_flow_error_set(error, EINVAL,
112                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
113                                    NULL, "NULL action.");
114                 return -rte_errno;
115         }
116         if (!attr) {
117                 rte_flow_error_set(error, EINVAL,
118                                    RTE_FLOW_ERROR_TYPE_ATTR,
119                                    NULL, "NULL attribute.");
120                 return -rte_errno;
121         }
122
123         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
124         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
125
126         /* the first not void item can be MAC or IPv4 */
127         item = next_no_void_pattern(pattern, NULL);
128
129         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
130             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
131                 rte_flow_error_set(error, EINVAL,
132                         RTE_FLOW_ERROR_TYPE_ITEM,
133                         item, "Not supported by ntuple filter");
134                 return -rte_errno;
135         }
136         /* Skip Ethernet */
137         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
138                 eth_spec = item->spec;
139                 eth_mask = item->mask;
140                 /*Not supported last point for range*/
141                 if (item->last) {
142                         rte_flow_error_set(error,
143                           EINVAL,
144                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
145                           item, "Not supported last point for range");
146                         return -rte_errno;
147                 }
148                 /* if the first item is MAC, the content should be NULL */
149                 if ((item->spec || item->mask) &&
150                         (memcmp(eth_spec, &eth_null,
151                                 sizeof(struct rte_flow_item_eth)) ||
152                          memcmp(eth_mask, &eth_null,
153                                 sizeof(struct rte_flow_item_eth)))) {
154                         rte_flow_error_set(error, EINVAL,
155                                 RTE_FLOW_ERROR_TYPE_ITEM,
156                                 item, "Not supported by ntuple filter");
157                         return -rte_errno;
158                 }
159                 /* check if the next not void item is IPv4 or Vlan */
160                 item = next_no_void_pattern(pattern, item);
161                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
162                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
163                         rte_flow_error_set(error,
164                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
165                                 item, "Not supported by ntuple filter");
166                         return -rte_errno;
167                 }
168         }
169
170         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
171                 vlan_spec = item->spec;
172                 vlan_mask = item->mask;
173                 /*Not supported last point for range*/
174                 if (item->last) {
175                         rte_flow_error_set(error,
176                                 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
177                                 item, "Not supported last point for range");
178                         return -rte_errno;
179                 }
180                 /* the content should be NULL */
181                 if ((item->spec || item->mask) &&
182                         (memcmp(vlan_spec, &vlan_null,
183                                 sizeof(struct rte_flow_item_vlan)) ||
184                          memcmp(vlan_mask, &vlan_null,
185                                 sizeof(struct rte_flow_item_vlan)))) {
186                         rte_flow_error_set(error, EINVAL,
187                                 RTE_FLOW_ERROR_TYPE_ITEM,
188                                 item, "Not supported by ntuple filter");
189                         return -rte_errno;
190                 }
191                 /* check if the next not void item is IPv4 */
192                 item = next_no_void_pattern(pattern, item);
193                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
194                         rte_flow_error_set(error,
195                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
196                           item, "Not supported by ntuple filter");
197                         return -rte_errno;
198                 }
199         }
200
201         if (item->mask) {
202                 /* get the IPv4 info */
203                 if (!item->spec || !item->mask) {
204                         rte_flow_error_set(error, EINVAL,
205                                 RTE_FLOW_ERROR_TYPE_ITEM,
206                                 item, "Invalid ntuple mask");
207                         return -rte_errno;
208                 }
209                 /*Not supported last point for range*/
210                 if (item->last) {
211                         rte_flow_error_set(error, EINVAL,
212                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
213                                 item, "Not supported last point for range");
214                         return -rte_errno;
215                 }
216
217                 ipv4_mask = item->mask;
218                 /**
219                  * Only support src & dst addresses, protocol,
220                  * others should be masked.
221                  */
222                 if (ipv4_mask->hdr.version_ihl ||
223                     ipv4_mask->hdr.type_of_service ||
224                     ipv4_mask->hdr.total_length ||
225                     ipv4_mask->hdr.packet_id ||
226                     ipv4_mask->hdr.fragment_offset ||
227                     ipv4_mask->hdr.time_to_live ||
228                     ipv4_mask->hdr.hdr_checksum) {
229                         rte_flow_error_set(error,
230                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
231                                 item, "Not supported by ntuple filter");
232                         return -rte_errno;
233                 }
234                 if ((ipv4_mask->hdr.src_addr != 0 &&
235                         ipv4_mask->hdr.src_addr != UINT32_MAX) ||
236                         (ipv4_mask->hdr.dst_addr != 0 &&
237                         ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
238                         (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
239                         ipv4_mask->hdr.next_proto_id != 0)) {
240                         rte_flow_error_set(error,
241                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
242                                 item, "Not supported by ntuple filter");
243                         return -rte_errno;
244                 }
245
246                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
247                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
248                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
249
250                 ipv4_spec = item->spec;
251                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
252                 filter->src_ip = ipv4_spec->hdr.src_addr;
253                 filter->proto  = ipv4_spec->hdr.next_proto_id;
254         }
255
256         /* check if the next not void item is TCP or UDP */
257         item = next_no_void_pattern(pattern, item);
258         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
259             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
260             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
261             item->type != RTE_FLOW_ITEM_TYPE_END) {
262                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
263                 rte_flow_error_set(error, EINVAL,
264                         RTE_FLOW_ERROR_TYPE_ITEM,
265                         item, "Not supported by ntuple filter");
266                 return -rte_errno;
267         }
268
269         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
270                 (!item->spec && !item->mask)) {
271                 goto action;
272         }
273
274         /* get the TCP/UDP/SCTP info */
275         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
276                 (!item->spec || !item->mask)) {
277                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
278                 rte_flow_error_set(error, EINVAL,
279                         RTE_FLOW_ERROR_TYPE_ITEM,
280                         item, "Invalid ntuple mask");
281                 return -rte_errno;
282         }
283
284         /*Not supported last point for range*/
285         if (item->last) {
286                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
287                 rte_flow_error_set(error, EINVAL,
288                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
289                         item, "Not supported last point for range");
290                 return -rte_errno;
291         }
292
293         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
294                 tcp_mask = item->mask;
295
296                 /**
297                  * Only support src & dst ports, tcp flags,
298                  * others should be masked.
299                  */
300                 if (tcp_mask->hdr.sent_seq ||
301                     tcp_mask->hdr.recv_ack ||
302                     tcp_mask->hdr.data_off ||
303                     tcp_mask->hdr.rx_win ||
304                     tcp_mask->hdr.cksum ||
305                     tcp_mask->hdr.tcp_urp) {
306                         memset(filter, 0,
307                                 sizeof(struct rte_eth_ntuple_filter));
308                         rte_flow_error_set(error, EINVAL,
309                                 RTE_FLOW_ERROR_TYPE_ITEM,
310                                 item, "Not supported by ntuple filter");
311                         return -rte_errno;
312                 }
313                 if ((tcp_mask->hdr.src_port != 0 &&
314                         tcp_mask->hdr.src_port != UINT16_MAX) ||
315                         (tcp_mask->hdr.dst_port != 0 &&
316                         tcp_mask->hdr.dst_port != UINT16_MAX)) {
317                         rte_flow_error_set(error,
318                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
319                                 item, "Not supported by ntuple filter");
320                         return -rte_errno;
321                 }
322
323                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
324                 filter->src_port_mask  = tcp_mask->hdr.src_port;
325                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
326                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
327                 } else if (!tcp_mask->hdr.tcp_flags) {
328                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
329                 } else {
330                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
331                         rte_flow_error_set(error, EINVAL,
332                                 RTE_FLOW_ERROR_TYPE_ITEM,
333                                 item, "Not supported by ntuple filter");
334                         return -rte_errno;
335                 }
336
337                 tcp_spec = item->spec;
338                 filter->dst_port  = tcp_spec->hdr.dst_port;
339                 filter->src_port  = tcp_spec->hdr.src_port;
340                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
341         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
342                 udp_mask = item->mask;
343
344                 /**
345                  * Only support src & dst ports,
346                  * others should be masked.
347                  */
348                 if (udp_mask->hdr.dgram_len ||
349                     udp_mask->hdr.dgram_cksum) {
350                         memset(filter, 0,
351                                 sizeof(struct rte_eth_ntuple_filter));
352                         rte_flow_error_set(error, EINVAL,
353                                 RTE_FLOW_ERROR_TYPE_ITEM,
354                                 item, "Not supported by ntuple filter");
355                         return -rte_errno;
356                 }
357                 if ((udp_mask->hdr.src_port != 0 &&
358                         udp_mask->hdr.src_port != UINT16_MAX) ||
359                         (udp_mask->hdr.dst_port != 0 &&
360                         udp_mask->hdr.dst_port != UINT16_MAX)) {
361                         rte_flow_error_set(error,
362                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363                                 item, "Not supported by ntuple filter");
364                         return -rte_errno;
365                 }
366
367                 filter->dst_port_mask = udp_mask->hdr.dst_port;
368                 filter->src_port_mask = udp_mask->hdr.src_port;
369
370                 udp_spec = item->spec;
371                 filter->dst_port = udp_spec->hdr.dst_port;
372                 filter->src_port = udp_spec->hdr.src_port;
373         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
374                 sctp_mask = item->mask;
375
376                 /**
377                  * Only support src & dst ports,
378                  * others should be masked.
379                  */
380                 if (sctp_mask->hdr.tag ||
381                     sctp_mask->hdr.cksum) {
382                         memset(filter, 0,
383                                 sizeof(struct rte_eth_ntuple_filter));
384                         rte_flow_error_set(error, EINVAL,
385                                 RTE_FLOW_ERROR_TYPE_ITEM,
386                                 item, "Not supported by ntuple filter");
387                         return -rte_errno;
388                 }
389
390                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
391                 filter->src_port_mask = sctp_mask->hdr.src_port;
392
393                 sctp_spec = item->spec;
394                 filter->dst_port = sctp_spec->hdr.dst_port;
395                 filter->src_port = sctp_spec->hdr.src_port;
396         } else {
397                 goto action;
398         }
399
400         /* check if the next not void item is END */
401         item = next_no_void_pattern(pattern, item);
402         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
403                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
404                 rte_flow_error_set(error, EINVAL,
405                         RTE_FLOW_ERROR_TYPE_ITEM,
406                         item, "Not supported by ntuple filter");
407                 return -rte_errno;
408         }
409
410 action:
411
412         /**
413          * n-tuple only supports forwarding,
414          * check if the first not void action is QUEUE.
415          */
416         act = next_no_void_action(actions, NULL);
417         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
418                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
419                 rte_flow_error_set(error, EINVAL,
420                         RTE_FLOW_ERROR_TYPE_ACTION,
421                         item, "Not supported action.");
422                 return -rte_errno;
423         }
424         filter->queue =
425                 ((const struct rte_flow_action_queue *)act->conf)->index;
426
427         /* check if the next not void item is END */
428         act = next_no_void_action(actions, act);
429         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
430                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431                 rte_flow_error_set(error, EINVAL,
432                         RTE_FLOW_ERROR_TYPE_ACTION,
433                         act, "Not supported action.");
434                 return -rte_errno;
435         }
436
437         /* parse attr */
438         /* must be input direction */
439         if (!attr->ingress) {
440                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
441                 rte_flow_error_set(error, EINVAL,
442                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
443                                    attr, "Only support ingress.");
444                 return -rte_errno;
445         }
446
447         /* not supported */
448         if (attr->egress) {
449                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
450                 rte_flow_error_set(error, EINVAL,
451                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
452                                    attr, "Not support egress.");
453                 return -rte_errno;
454         }
455
456         /* not supported */
457         if (attr->transfer) {
458                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
459                 rte_flow_error_set(error, EINVAL,
460                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
461                                    attr, "No support for transfer.");
462                 return -rte_errno;
463         }
464
465         if (attr->priority > 0xFFFF) {
466                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
467                 rte_flow_error_set(error, EINVAL,
468                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
469                                    attr, "Error priority.");
470                 return -rte_errno;
471         }
472         filter->priority = (uint16_t)attr->priority;
473         if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
474                 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
475                 filter->priority = 1;
476
477         return 0;
478 }
479
480 /* a specific function for txgbe because the flags is specific */
481 static int
482 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
483                           const struct rte_flow_attr *attr,
484                           const struct rte_flow_item pattern[],
485                           const struct rte_flow_action actions[],
486                           struct rte_eth_ntuple_filter *filter,
487                           struct rte_flow_error *error)
488 {
489         int ret;
490
491         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
492
493         if (ret)
494                 return ret;
495
496         /* txgbe doesn't support tcp flags */
497         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
498                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
499                 rte_flow_error_set(error, EINVAL,
500                                    RTE_FLOW_ERROR_TYPE_ITEM,
501                                    NULL, "Not supported by ntuple filter");
502                 return -rte_errno;
503         }
504
505         /* txgbe doesn't support many priorities */
506         if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
507             filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
508                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
509                 rte_flow_error_set(error, EINVAL,
510                         RTE_FLOW_ERROR_TYPE_ITEM,
511                         NULL, "Priority not supported by ntuple filter");
512                 return -rte_errno;
513         }
514
515         if (filter->queue >= dev->data->nb_rx_queues) {
516                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
517                 rte_flow_error_set(error, EINVAL,
518                                    RTE_FLOW_ERROR_TYPE_ITEM,
519                                    NULL, "Not supported by ntuple filter");
520                 return -rte_errno;
521         }
522
523         /* fixed value for txgbe */
524         filter->flags = RTE_5TUPLE_FLAGS;
525         return 0;
526 }
527
528 /**
529  * Parse the rule to see if it is a ethertype rule.
530  * And get the ethertype filter info BTW.
531  * pattern:
532  * The first not void item can be ETH.
533  * The next not void item must be END.
534  * action:
535  * The first not void action should be QUEUE.
536  * The next not void action should be END.
537  * pattern example:
538  * ITEM         Spec                    Mask
539  * ETH          type    0x0807          0xFFFF
540  * END
541  * other members in mask and spec should set to 0x00.
542  * item->last should be NULL.
543  */
544 static int
545 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
546                             const struct rte_flow_item *pattern,
547                             const struct rte_flow_action *actions,
548                             struct rte_eth_ethertype_filter *filter,
549                             struct rte_flow_error *error)
550 {
551         const struct rte_flow_item *item;
552         const struct rte_flow_action *act;
553         const struct rte_flow_item_eth *eth_spec;
554         const struct rte_flow_item_eth *eth_mask;
555         const struct rte_flow_action_queue *act_q;
556
557         if (!pattern) {
558                 rte_flow_error_set(error, EINVAL,
559                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
560                                 NULL, "NULL pattern.");
561                 return -rte_errno;
562         }
563
564         if (!actions) {
565                 rte_flow_error_set(error, EINVAL,
566                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
567                                 NULL, "NULL action.");
568                 return -rte_errno;
569         }
570
571         if (!attr) {
572                 rte_flow_error_set(error, EINVAL,
573                                    RTE_FLOW_ERROR_TYPE_ATTR,
574                                    NULL, "NULL attribute.");
575                 return -rte_errno;
576         }
577
578         item = next_no_void_pattern(pattern, NULL);
579         /* The first non-void item should be MAC. */
580         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
581                 rte_flow_error_set(error, EINVAL,
582                         RTE_FLOW_ERROR_TYPE_ITEM,
583                         item, "Not supported by ethertype filter");
584                 return -rte_errno;
585         }
586
587         /*Not supported last point for range*/
588         if (item->last) {
589                 rte_flow_error_set(error, EINVAL,
590                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
591                         item, "Not supported last point for range");
592                 return -rte_errno;
593         }
594
595         /* Get the MAC info. */
596         if (!item->spec || !item->mask) {
597                 rte_flow_error_set(error, EINVAL,
598                                 RTE_FLOW_ERROR_TYPE_ITEM,
599                                 item, "Not supported by ethertype filter");
600                 return -rte_errno;
601         }
602
603         eth_spec = item->spec;
604         eth_mask = item->mask;
605
606         /* Mask bits of source MAC address must be full of 0.
607          * Mask bits of destination MAC address must be full
608          * of 1 or full of 0.
609          */
610         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
611             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
612              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
613                 rte_flow_error_set(error, EINVAL,
614                                 RTE_FLOW_ERROR_TYPE_ITEM,
615                                 item, "Invalid ether address mask");
616                 return -rte_errno;
617         }
618
619         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
620                 rte_flow_error_set(error, EINVAL,
621                                 RTE_FLOW_ERROR_TYPE_ITEM,
622                                 item, "Invalid ethertype mask");
623                 return -rte_errno;
624         }
625
626         /* If mask bits of destination MAC address
627          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
628          */
629         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
630                 filter->mac_addr = eth_spec->dst;
631                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
632         } else {
633                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
634         }
635         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
636
637         /* Check if the next non-void item is END. */
638         item = next_no_void_pattern(pattern, item);
639         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
640                 rte_flow_error_set(error, EINVAL,
641                                 RTE_FLOW_ERROR_TYPE_ITEM,
642                                 item, "Not supported by ethertype filter.");
643                 return -rte_errno;
644         }
645
646         /* Parse action */
647
648         act = next_no_void_action(actions, NULL);
649         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
650             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
651                 rte_flow_error_set(error, EINVAL,
652                                 RTE_FLOW_ERROR_TYPE_ACTION,
653                                 act, "Not supported action.");
654                 return -rte_errno;
655         }
656
657         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
658                 act_q = (const struct rte_flow_action_queue *)act->conf;
659                 filter->queue = act_q->index;
660         } else {
661                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
662         }
663
664         /* Check if the next non-void item is END */
665         act = next_no_void_action(actions, act);
666         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
667                 rte_flow_error_set(error, EINVAL,
668                                 RTE_FLOW_ERROR_TYPE_ACTION,
669                                 act, "Not supported action.");
670                 return -rte_errno;
671         }
672
673         /* Parse attr */
674         /* Must be input direction */
675         if (!attr->ingress) {
676                 rte_flow_error_set(error, EINVAL,
677                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
678                                 attr, "Only support ingress.");
679                 return -rte_errno;
680         }
681
682         /* Not supported */
683         if (attr->egress) {
684                 rte_flow_error_set(error, EINVAL,
685                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
686                                 attr, "Not support egress.");
687                 return -rte_errno;
688         }
689
690         /* Not supported */
691         if (attr->transfer) {
692                 rte_flow_error_set(error, EINVAL,
693                                 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
694                                 attr, "No support for transfer.");
695                 return -rte_errno;
696         }
697
698         /* Not supported */
699         if (attr->priority) {
700                 rte_flow_error_set(error, EINVAL,
701                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
702                                 attr, "Not support priority.");
703                 return -rte_errno;
704         }
705
706         /* Not supported */
707         if (attr->group) {
708                 rte_flow_error_set(error, EINVAL,
709                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
710                                 attr, "Not support group.");
711                 return -rte_errno;
712         }
713
714         return 0;
715 }
716
717 static int
718 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
719                              const struct rte_flow_attr *attr,
720                              const struct rte_flow_item pattern[],
721                              const struct rte_flow_action actions[],
722                              struct rte_eth_ethertype_filter *filter,
723                              struct rte_flow_error *error)
724 {
725         int ret;
726
727         ret = cons_parse_ethertype_filter(attr, pattern,
728                                         actions, filter, error);
729
730         if (ret)
731                 return ret;
732
733         if (filter->queue >= dev->data->nb_rx_queues) {
734                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
735                 rte_flow_error_set(error, EINVAL,
736                         RTE_FLOW_ERROR_TYPE_ITEM,
737                         NULL, "queue index much too big");
738                 return -rte_errno;
739         }
740
741         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
742                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
743                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
744                 rte_flow_error_set(error, EINVAL,
745                         RTE_FLOW_ERROR_TYPE_ITEM,
746                         NULL, "IPv4/IPv6 not supported by ethertype filter");
747                 return -rte_errno;
748         }
749
750         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
751                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
752                 rte_flow_error_set(error, EINVAL,
753                         RTE_FLOW_ERROR_TYPE_ITEM,
754                         NULL, "mac compare is unsupported");
755                 return -rte_errno;
756         }
757
758         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
759                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
760                 rte_flow_error_set(error, EINVAL,
761                         RTE_FLOW_ERROR_TYPE_ITEM,
762                         NULL, "drop option is unsupported");
763                 return -rte_errno;
764         }
765
766         return 0;
767 }
768
769 /**
770  * Parse the rule to see if it is a TCP SYN rule.
771  * And get the TCP SYN filter info BTW.
772  * pattern:
773  * The first not void item must be ETH.
774  * The second not void item must be IPV4 or IPV6.
775  * The third not void item must be TCP.
776  * The next not void item must be END.
777  * action:
778  * The first not void action should be QUEUE.
779  * The next not void action should be END.
780  * pattern example:
781  * ITEM         Spec                    Mask
782  * ETH          NULL                    NULL
783  * IPV4/IPV6    NULL                    NULL
784  * TCP          tcp_flags       0x02    0xFF
785  * END
786  * other members in mask and spec should set to 0x00.
787  * item->last should be NULL.
788  */
789 static int
790 cons_parse_syn_filter(const struct rte_flow_attr *attr,
791                                 const struct rte_flow_item pattern[],
792                                 const struct rte_flow_action actions[],
793                                 struct rte_eth_syn_filter *filter,
794                                 struct rte_flow_error *error)
795 {
796         const struct rte_flow_item *item;
797         const struct rte_flow_action *act;
798         const struct rte_flow_item_tcp *tcp_spec;
799         const struct rte_flow_item_tcp *tcp_mask;
800         const struct rte_flow_action_queue *act_q;
801
802         if (!pattern) {
803                 rte_flow_error_set(error, EINVAL,
804                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
805                                 NULL, "NULL pattern.");
806                 return -rte_errno;
807         }
808
809         if (!actions) {
810                 rte_flow_error_set(error, EINVAL,
811                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
812                                 NULL, "NULL action.");
813                 return -rte_errno;
814         }
815
816         if (!attr) {
817                 rte_flow_error_set(error, EINVAL,
818                                    RTE_FLOW_ERROR_TYPE_ATTR,
819                                    NULL, "NULL attribute.");
820                 return -rte_errno;
821         }
822
823
824         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
825         item = next_no_void_pattern(pattern, NULL);
826         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
827             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
828             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
829             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
830                 rte_flow_error_set(error, EINVAL,
831                                 RTE_FLOW_ERROR_TYPE_ITEM,
832                                 item, "Not supported by syn filter");
833                 return -rte_errno;
834         }
835                 /*Not supported last point for range*/
836         if (item->last) {
837                 rte_flow_error_set(error, EINVAL,
838                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
839                         item, "Not supported last point for range");
840                 return -rte_errno;
841         }
842
843         /* Skip Ethernet */
844         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
845                 /* if the item is MAC, the content should be NULL */
846                 if (item->spec || item->mask) {
847                         rte_flow_error_set(error, EINVAL,
848                                 RTE_FLOW_ERROR_TYPE_ITEM,
849                                 item, "Invalid SYN address mask");
850                         return -rte_errno;
851                 }
852
853                 /* check if the next not void item is IPv4 or IPv6 */
854                 item = next_no_void_pattern(pattern, item);
855                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
856                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
857                         rte_flow_error_set(error, EINVAL,
858                                 RTE_FLOW_ERROR_TYPE_ITEM,
859                                 item, "Not supported by syn filter");
860                         return -rte_errno;
861                 }
862         }
863
864         /* Skip IP */
865         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
866             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
867                 /* if the item is IP, the content should be NULL */
868                 if (item->spec || item->mask) {
869                         rte_flow_error_set(error, EINVAL,
870                                 RTE_FLOW_ERROR_TYPE_ITEM,
871                                 item, "Invalid SYN mask");
872                         return -rte_errno;
873                 }
874
875                 /* check if the next not void item is TCP */
876                 item = next_no_void_pattern(pattern, item);
877                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
878                         rte_flow_error_set(error, EINVAL,
879                                 RTE_FLOW_ERROR_TYPE_ITEM,
880                                 item, "Not supported by syn filter");
881                         return -rte_errno;
882                 }
883         }
884
885         /* Get the TCP info. Only support SYN. */
886         if (!item->spec || !item->mask) {
887                 rte_flow_error_set(error, EINVAL,
888                                 RTE_FLOW_ERROR_TYPE_ITEM,
889                                 item, "Invalid SYN mask");
890                 return -rte_errno;
891         }
892         /*Not supported last point for range*/
893         if (item->last) {
894                 rte_flow_error_set(error, EINVAL,
895                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
896                         item, "Not supported last point for range");
897                 return -rte_errno;
898         }
899
900         tcp_spec = item->spec;
901         tcp_mask = item->mask;
902         if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
903             tcp_mask->hdr.src_port ||
904             tcp_mask->hdr.dst_port ||
905             tcp_mask->hdr.sent_seq ||
906             tcp_mask->hdr.recv_ack ||
907             tcp_mask->hdr.data_off ||
908             tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
909             tcp_mask->hdr.rx_win ||
910             tcp_mask->hdr.cksum ||
911             tcp_mask->hdr.tcp_urp) {
912                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
913                 rte_flow_error_set(error, EINVAL,
914                                 RTE_FLOW_ERROR_TYPE_ITEM,
915                                 item, "Not supported by syn filter");
916                 return -rte_errno;
917         }
918
919         /* check if the next not void item is END */
920         item = next_no_void_pattern(pattern, item);
921         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
922                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
923                 rte_flow_error_set(error, EINVAL,
924                                 RTE_FLOW_ERROR_TYPE_ITEM,
925                                 item, "Not supported by syn filter");
926                 return -rte_errno;
927         }
928
929         /* check if the first not void action is QUEUE. */
930         act = next_no_void_action(actions, NULL);
931         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
932                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
933                 rte_flow_error_set(error, EINVAL,
934                                 RTE_FLOW_ERROR_TYPE_ACTION,
935                                 act, "Not supported action.");
936                 return -rte_errno;
937         }
938
939         act_q = (const struct rte_flow_action_queue *)act->conf;
940         filter->queue = act_q->index;
941         if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
942                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
943                 rte_flow_error_set(error, EINVAL,
944                                 RTE_FLOW_ERROR_TYPE_ACTION,
945                                 act, "Not supported action.");
946                 return -rte_errno;
947         }
948
949         /* check if the next not void item is END */
950         act = next_no_void_action(actions, act);
951         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
952                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
953                 rte_flow_error_set(error, EINVAL,
954                                 RTE_FLOW_ERROR_TYPE_ACTION,
955                                 act, "Not supported action.");
956                 return -rte_errno;
957         }
958
959         /* parse attr */
960         /* must be input direction */
961         if (!attr->ingress) {
962                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
963                 rte_flow_error_set(error, EINVAL,
964                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
965                         attr, "Only support ingress.");
966                 return -rte_errno;
967         }
968
969         /* not supported */
970         if (attr->egress) {
971                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
972                 rte_flow_error_set(error, EINVAL,
973                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
974                         attr, "Not support egress.");
975                 return -rte_errno;
976         }
977
978         /* not supported */
979         if (attr->transfer) {
980                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
981                 rte_flow_error_set(error, EINVAL,
982                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
983                         attr, "No support for transfer.");
984                 return -rte_errno;
985         }
986
987         /* Support 2 priorities, the lowest or highest. */
988         if (!attr->priority) {
989                 filter->hig_pri = 0;
990         } else if (attr->priority == (uint32_t)~0U) {
991                 filter->hig_pri = 1;
992         } else {
993                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
994                 rte_flow_error_set(error, EINVAL,
995                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
996                         attr, "Not support priority.");
997                 return -rte_errno;
998         }
999
1000         return 0;
1001 }
1002
1003 static int
1004 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1005                              const struct rte_flow_attr *attr,
1006                              const struct rte_flow_item pattern[],
1007                              const struct rte_flow_action actions[],
1008                              struct rte_eth_syn_filter *filter,
1009                              struct rte_flow_error *error)
1010 {
1011         int ret;
1012
1013         ret = cons_parse_syn_filter(attr, pattern,
1014                                         actions, filter, error);
1015
1016         if (filter->queue >= dev->data->nb_rx_queues)
1017                 return -rte_errno;
1018
1019         if (ret)
1020                 return ret;
1021
1022         return 0;
1023 }
1024
1025 /**
1026  * Parse the rule to see if it is a L2 tunnel rule.
1027  * And get the L2 tunnel filter info BTW.
1028  * Only support E-tag now.
1029  * pattern:
1030  * The first not void item can be E_TAG.
1031  * The next not void item must be END.
1032  * action:
1033  * The first not void action should be VF or PF.
1034  * The next not void action should be END.
1035  * pattern example:
1036  * ITEM         Spec                    Mask
1037  * E_TAG        grp             0x1     0x3
1038                 e_cid_base      0x309   0xFFF
1039  * END
1040  * other members in mask and spec should set to 0x00.
1041  * item->last should be NULL.
1042  */
1043 static int
1044 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1045                         const struct rte_flow_attr *attr,
1046                         const struct rte_flow_item pattern[],
1047                         const struct rte_flow_action actions[],
1048                         struct txgbe_l2_tunnel_conf *filter,
1049                         struct rte_flow_error *error)
1050 {
1051         const struct rte_flow_item *item;
1052         const struct rte_flow_item_e_tag *e_tag_spec;
1053         const struct rte_flow_item_e_tag *e_tag_mask;
1054         const struct rte_flow_action *act;
1055         const struct rte_flow_action_vf *act_vf;
1056         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1057
1058         if (!pattern) {
1059                 rte_flow_error_set(error, EINVAL,
1060                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1061                         NULL, "NULL pattern.");
1062                 return -rte_errno;
1063         }
1064
1065         if (!actions) {
1066                 rte_flow_error_set(error, EINVAL,
1067                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1068                                    NULL, "NULL action.");
1069                 return -rte_errno;
1070         }
1071
1072         if (!attr) {
1073                 rte_flow_error_set(error, EINVAL,
1074                                    RTE_FLOW_ERROR_TYPE_ATTR,
1075                                    NULL, "NULL attribute.");
1076                 return -rte_errno;
1077         }
1078
1079         /* The first not void item should be e-tag. */
1080         item = next_no_void_pattern(pattern, NULL);
1081         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1082                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1083                 rte_flow_error_set(error, EINVAL,
1084                         RTE_FLOW_ERROR_TYPE_ITEM,
1085                         item, "Not supported by L2 tunnel filter");
1086                 return -rte_errno;
1087         }
1088
1089         if (!item->spec || !item->mask) {
1090                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1091                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1092                         item, "Not supported by L2 tunnel filter");
1093                 return -rte_errno;
1094         }
1095
1096         /*Not supported last point for range*/
1097         if (item->last) {
1098                 rte_flow_error_set(error, EINVAL,
1099                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1100                         item, "Not supported last point for range");
1101                 return -rte_errno;
1102         }
1103
1104         e_tag_spec = item->spec;
1105         e_tag_mask = item->mask;
1106
1107         /* Only care about GRP and E cid base. */
1108         if (e_tag_mask->epcp_edei_in_ecid_b ||
1109             e_tag_mask->in_ecid_e ||
1110             e_tag_mask->ecid_e ||
1111             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1112                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1113                 rte_flow_error_set(error, EINVAL,
1114                         RTE_FLOW_ERROR_TYPE_ITEM,
1115                         item, "Not supported by L2 tunnel filter");
1116                 return -rte_errno;
1117         }
1118
1119         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1120         /**
1121          * grp and e_cid_base are bit fields and only use 14 bits.
1122          * e-tag id is taken as little endian by HW.
1123          */
1124         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1125
1126         /* check if the next not void item is END */
1127         item = next_no_void_pattern(pattern, item);
1128         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1129                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1130                 rte_flow_error_set(error, EINVAL,
1131                         RTE_FLOW_ERROR_TYPE_ITEM,
1132                         item, "Not supported by L2 tunnel filter");
1133                 return -rte_errno;
1134         }
1135
1136         /* parse attr */
1137         /* must be input direction */
1138         if (!attr->ingress) {
1139                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1140                 rte_flow_error_set(error, EINVAL,
1141                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1142                         attr, "Only support ingress.");
1143                 return -rte_errno;
1144         }
1145
1146         /* not supported */
1147         if (attr->egress) {
1148                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1149                 rte_flow_error_set(error, EINVAL,
1150                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1151                         attr, "Not support egress.");
1152                 return -rte_errno;
1153         }
1154
1155         /* not supported */
1156         if (attr->transfer) {
1157                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1158                 rte_flow_error_set(error, EINVAL,
1159                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1160                         attr, "No support for transfer.");
1161                 return -rte_errno;
1162         }
1163
1164         /* not supported */
1165         if (attr->priority) {
1166                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1167                 rte_flow_error_set(error, EINVAL,
1168                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1169                         attr, "Not support priority.");
1170                 return -rte_errno;
1171         }
1172
1173         /* check if the first not void action is VF or PF. */
1174         act = next_no_void_action(actions, NULL);
1175         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1176                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1177                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1178                 rte_flow_error_set(error, EINVAL,
1179                         RTE_FLOW_ERROR_TYPE_ACTION,
1180                         act, "Not supported action.");
1181                 return -rte_errno;
1182         }
1183
1184         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1185                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1186                 filter->pool = act_vf->id;
1187         } else {
1188                 filter->pool = pci_dev->max_vfs;
1189         }
1190
1191         /* check if the next not void item is END */
1192         act = next_no_void_action(actions, act);
1193         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1194                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1195                 rte_flow_error_set(error, EINVAL,
1196                         RTE_FLOW_ERROR_TYPE_ACTION,
1197                         act, "Not supported action.");
1198                 return -rte_errno;
1199         }
1200
1201         return 0;
1202 }
1203
1204 static int
1205 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1206                         const struct rte_flow_attr *attr,
1207                         const struct rte_flow_item pattern[],
1208                         const struct rte_flow_action actions[],
1209                         struct txgbe_l2_tunnel_conf *l2_tn_filter,
1210                         struct rte_flow_error *error)
1211 {
1212         int ret = 0;
1213         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1214         uint16_t vf_num;
1215
1216         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1217                                 actions, l2_tn_filter, error);
1218
1219         vf_num = pci_dev->max_vfs;
1220
1221         if (l2_tn_filter->pool > vf_num)
1222                 return -rte_errno;
1223
1224         return ret;
1225 }
1226
1227 /**
1228  * Create or destroy a flow rule.
1229  * Theorically one rule can match more than one filters.
1230  * We will let it use the filter which it hitt first.
1231  * So, the sequence matters.
1232  */
1233 static struct rte_flow *
1234 txgbe_flow_create(struct rte_eth_dev *dev,
1235                   const struct rte_flow_attr *attr,
1236                   const struct rte_flow_item pattern[],
1237                   const struct rte_flow_action actions[],
1238                   struct rte_flow_error *error)
1239 {
1240         struct rte_flow *flow = NULL;
1241         return flow;
1242 }
1243
1244 /**
1245  * Check if the flow rule is supported by txgbe.
1246  * It only checks the format. Don't guarantee the rule can be programmed into
1247  * the HW. Because there can be no enough room for the rule.
1248  */
1249 static int
1250 txgbe_flow_validate(struct rte_eth_dev *dev,
1251                 const struct rte_flow_attr *attr,
1252                 const struct rte_flow_item pattern[],
1253                 const struct rte_flow_action actions[],
1254                 struct rte_flow_error *error)
1255 {
1256         struct rte_eth_ntuple_filter ntuple_filter;
1257         struct rte_eth_ethertype_filter ethertype_filter;
1258         struct rte_eth_syn_filter syn_filter;
1259         struct txgbe_l2_tunnel_conf l2_tn_filter;
1260         int ret = 0;
1261
1262         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1263         ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
1264                                 actions, &ntuple_filter, error);
1265         if (!ret)
1266                 return 0;
1267
1268         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1269         ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
1270                                 actions, &ethertype_filter, error);
1271         if (!ret)
1272                 return 0;
1273
1274         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1275         ret = txgbe_parse_syn_filter(dev, attr, pattern,
1276                                 actions, &syn_filter, error);
1277         if (!ret)
1278                 return 0;
1279
1280         memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1281         ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
1282                                 actions, &l2_tn_filter, error);
1283         if (!ret)
1284                 return 0;
1285
1286         return ret;
1287 }
1288
1289 /* Destroy a flow rule on txgbe. */
1290 static int
1291 txgbe_flow_destroy(struct rte_eth_dev *dev,
1292                 struct rte_flow *flow,
1293                 struct rte_flow_error *error)
1294 {
1295         int ret = 0;
1296
1297         return ret;
1298 }
1299
1300 /*  Destroy all flow rules associated with a port on txgbe. */
1301 static int
1302 txgbe_flow_flush(struct rte_eth_dev *dev,
1303                 struct rte_flow_error *error)
1304 {
1305         int ret = 0;
1306
1307         return ret;
1308 }
1309
1310 const struct rte_flow_ops txgbe_flow_ops = {
1311         .validate = txgbe_flow_validate,
1312         .create = txgbe_flow_create,
1313         .destroy = txgbe_flow_destroy,
1314         .flush = txgbe_flow_flush,
1315 };
1316