16b43d91ff12e99face6f208fd60fc1671996c40
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79                 struct rte_flow_error *error);
80 static int
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82                                         const struct rte_flow_item pattern[],
83                                         const struct rte_flow_action actions[],
84                                         struct rte_eth_ntuple_filter *filter,
85                                         struct rte_flow_error *error);
86 static int
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88                                         const struct rte_flow_item pattern[],
89                                         const struct rte_flow_action actions[],
90                                         struct rte_eth_ntuple_filter *filter,
91                                         struct rte_flow_error *error);
92 static int
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94                             const struct rte_flow_item *pattern,
95                             const struct rte_flow_action *actions,
96                             struct rte_eth_ethertype_filter *filter,
97                             struct rte_flow_error *error);
98 static int
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100                                 const struct rte_flow_item pattern[],
101                                 const struct rte_flow_action actions[],
102                                 struct rte_eth_ethertype_filter *filter,
103                                 struct rte_flow_error *error);
104 static int
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106                 const struct rte_flow_item pattern[],
107                 const struct rte_flow_action actions[],
108                 struct rte_eth_syn_filter *filter,
109                 struct rte_flow_error *error);
110 static int
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112                                 const struct rte_flow_item pattern[],
113                                 const struct rte_flow_action actions[],
114                                 struct rte_eth_syn_filter *filter,
115                                 struct rte_flow_error *error);
116 static int
117 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
118                 const struct rte_flow_item pattern[],
119                 const struct rte_flow_action actions[],
120                 struct rte_eth_l2_tunnel_conf *filter,
121                 struct rte_flow_error *error);
122 static int
123 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
124                         const struct rte_flow_attr *attr,
125                         const struct rte_flow_item pattern[],
126                         const struct rte_flow_action actions[],
127                         struct rte_eth_l2_tunnel_conf *rule,
128                         struct rte_flow_error *error);
129 static int
130 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
131                         const struct rte_flow_attr *attr,
132                         const struct rte_flow_item pattern[],
133                         const struct rte_flow_action actions[],
134                         struct ixgbe_fdir_rule *rule,
135                         struct rte_flow_error *error);
136 static int
137 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
138                 const struct rte_flow_item pattern[],
139                 const struct rte_flow_action actions[],
140                 struct ixgbe_fdir_rule *rule,
141                 struct rte_flow_error *error);
142 static int
143 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
144                 const struct rte_flow_item pattern[],
145                 const struct rte_flow_action actions[],
146                 struct ixgbe_fdir_rule *rule,
147                 struct rte_flow_error *error);
148 static int
149 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
150                 const struct rte_flow_item pattern[],
151                 const struct rte_flow_action actions[],
152                 struct ixgbe_fdir_rule *rule,
153                 struct rte_flow_error *error);
154 static int
155 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
156                 const struct rte_flow_attr *attr,
157                 const struct rte_flow_item pattern[],
158                 const struct rte_flow_action actions[],
159                 struct rte_flow_error *error);
160 static struct rte_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
161                 const struct rte_flow_attr *attr,
162                 const struct rte_flow_item pattern[],
163                 const struct rte_flow_action actions[],
164                 struct rte_flow_error *error);
165
166 const struct rte_flow_ops ixgbe_flow_ops = {
167         ixgbe_flow_validate,
168         ixgbe_flow_create,
169         NULL,
170         ixgbe_flow_flush,
171         NULL,
172 };
173
174 #define IXGBE_MIN_N_TUPLE_PRIO 1
175 #define IXGBE_MAX_N_TUPLE_PRIO 7
176 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
177         do {            \
178                 item = pattern + index;\
179                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
180                 index++;                                \
181                 item = pattern + index;         \
182                 }                                               \
183         } while (0)
184
185 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
186         do {                                                            \
187                 act = actions + index;                                  \
188                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
189                 index++;                                        \
190                 act = actions + index;                          \
191                 }                                                       \
192         } while (0)
193
194 /**
195  * Please aware there's an asumption for all the parsers.
196  * rte_flow_item is using big endian, rte_flow_attr and
197  * rte_flow_action are using CPU order.
198  * Because the pattern is used to describe the packets,
199  * normally the packets should use network order.
200  */
201
202 /**
203  * Parse the rule to see if it is a n-tuple rule.
204  * And get the n-tuple filter info BTW.
205  * pattern:
206  * The first not void item can be ETH or IPV4.
207  * The second not void item must be IPV4 if the first one is ETH.
208  * The third not void item must be UDP or TCP.
209  * The next not void item must be END.
210  * action:
211  * The first not void action should be QUEUE.
212  * The next not void action should be END.
213  * pattern example:
214  * ITEM         Spec                    Mask
215  * ETH          NULL                    NULL
216  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
217  *              dst_addr 192.167.3.50   0xFFFFFFFF
218  *              next_proto_id   17      0xFF
219  * UDP/TCP      src_port        80      0xFFFF
220  *              dst_port        80      0xFFFF
221  * END
222  * other members in mask and spec should set to 0x00.
223  * item->last should be NULL.
224  */
225 static int
226 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
227                          const struct rte_flow_item pattern[],
228                          const struct rte_flow_action actions[],
229                          struct rte_eth_ntuple_filter *filter,
230                          struct rte_flow_error *error)
231 {
232         const struct rte_flow_item *item;
233         const struct rte_flow_action *act;
234         const struct rte_flow_item_ipv4 *ipv4_spec;
235         const struct rte_flow_item_ipv4 *ipv4_mask;
236         const struct rte_flow_item_tcp *tcp_spec;
237         const struct rte_flow_item_tcp *tcp_mask;
238         const struct rte_flow_item_udp *udp_spec;
239         const struct rte_flow_item_udp *udp_mask;
240         uint32_t index;
241
242         if (!pattern) {
243                 rte_flow_error_set(error,
244                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
245                         NULL, "NULL pattern.");
246                 return -rte_errno;
247         }
248
249         if (!actions) {
250                 rte_flow_error_set(error, EINVAL,
251                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
252                                    NULL, "NULL action.");
253                 return -rte_errno;
254         }
255         if (!attr) {
256                 rte_flow_error_set(error, EINVAL,
257                                    RTE_FLOW_ERROR_TYPE_ATTR,
258                                    NULL, "NULL attribute.");
259                 return -rte_errno;
260         }
261
262         /* parse pattern */
263         index = 0;
264
265         /* the first not void item can be MAC or IPv4 */
266         NEXT_ITEM_OF_PATTERN(item, pattern, index);
267
268         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
269             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
270                 rte_flow_error_set(error, EINVAL,
271                         RTE_FLOW_ERROR_TYPE_ITEM,
272                         item, "Not supported by ntuple filter");
273                 return -rte_errno;
274         }
275         /* Skip Ethernet */
276         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
277                 /*Not supported last point for range*/
278                 if (item->last) {
279                         rte_flow_error_set(error,
280                           EINVAL,
281                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
282                           item, "Not supported last point for range");
283                         return -rte_errno;
284
285                 }
286                 /* if the first item is MAC, the content should be NULL */
287                 if (item->spec || item->mask) {
288                         rte_flow_error_set(error, EINVAL,
289                                 RTE_FLOW_ERROR_TYPE_ITEM,
290                                 item, "Not supported by ntuple filter");
291                         return -rte_errno;
292                 }
293                 /* check if the next not void item is IPv4 */
294                 index++;
295                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
296                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
297                         rte_flow_error_set(error,
298                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
299                           item, "Not supported by ntuple filter");
300                           return -rte_errno;
301                 }
302         }
303
304         /* get the IPv4 info */
305         if (!item->spec || !item->mask) {
306                 rte_flow_error_set(error, EINVAL,
307                         RTE_FLOW_ERROR_TYPE_ITEM,
308                         item, "Invalid ntuple mask");
309                 return -rte_errno;
310         }
311         /*Not supported last point for range*/
312         if (item->last) {
313                 rte_flow_error_set(error, EINVAL,
314                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
315                         item, "Not supported last point for range");
316                 return -rte_errno;
317
318         }
319
320         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
321         /**
322          * Only support src & dst addresses, protocol,
323          * others should be masked.
324          */
325         if (ipv4_mask->hdr.version_ihl ||
326             ipv4_mask->hdr.type_of_service ||
327             ipv4_mask->hdr.total_length ||
328             ipv4_mask->hdr.packet_id ||
329             ipv4_mask->hdr.fragment_offset ||
330             ipv4_mask->hdr.time_to_live ||
331             ipv4_mask->hdr.hdr_checksum) {
332                         rte_flow_error_set(error,
333                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
334                         item, "Not supported by ntuple filter");
335                 return -rte_errno;
336         }
337
338         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
339         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
340         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
341
342         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
343         filter->dst_ip = ipv4_spec->hdr.dst_addr;
344         filter->src_ip = ipv4_spec->hdr.src_addr;
345         filter->proto  = ipv4_spec->hdr.next_proto_id;
346
347         /* check if the next not void item is TCP or UDP */
348         index++;
349         NEXT_ITEM_OF_PATTERN(item, pattern, index);
350         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
351             item->type != RTE_FLOW_ITEM_TYPE_UDP) {
352                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
353                 rte_flow_error_set(error, EINVAL,
354                         RTE_FLOW_ERROR_TYPE_ITEM,
355                         item, "Not supported by ntuple filter");
356                 return -rte_errno;
357         }
358
359         /* get the TCP/UDP info */
360         if (!item->spec || !item->mask) {
361                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
362                 rte_flow_error_set(error, EINVAL,
363                         RTE_FLOW_ERROR_TYPE_ITEM,
364                         item, "Invalid ntuple mask");
365                 return -rte_errno;
366         }
367
368         /*Not supported last point for range*/
369         if (item->last) {
370                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
371                 rte_flow_error_set(error, EINVAL,
372                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
373                         item, "Not supported last point for range");
374                 return -rte_errno;
375
376         }
377
378         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
379                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
380
381                 /**
382                  * Only support src & dst ports, tcp flags,
383                  * others should be masked.
384                  */
385                 if (tcp_mask->hdr.sent_seq ||
386                     tcp_mask->hdr.recv_ack ||
387                     tcp_mask->hdr.data_off ||
388                     tcp_mask->hdr.rx_win ||
389                     tcp_mask->hdr.cksum ||
390                     tcp_mask->hdr.tcp_urp) {
391                         memset(filter, 0,
392                                 sizeof(struct rte_eth_ntuple_filter));
393                         rte_flow_error_set(error, EINVAL,
394                                 RTE_FLOW_ERROR_TYPE_ITEM,
395                                 item, "Not supported by ntuple filter");
396                         return -rte_errno;
397                 }
398
399                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
400                 filter->src_port_mask  = tcp_mask->hdr.src_port;
401                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
402                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
403                 } else if (!tcp_mask->hdr.tcp_flags) {
404                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
405                 } else {
406                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
407                         rte_flow_error_set(error, EINVAL,
408                                 RTE_FLOW_ERROR_TYPE_ITEM,
409                                 item, "Not supported by ntuple filter");
410                         return -rte_errno;
411                 }
412
413                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
414                 filter->dst_port  = tcp_spec->hdr.dst_port;
415                 filter->src_port  = tcp_spec->hdr.src_port;
416                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
417         } else {
418                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
419
420                 /**
421                  * Only support src & dst ports,
422                  * others should be masked.
423                  */
424                 if (udp_mask->hdr.dgram_len ||
425                     udp_mask->hdr.dgram_cksum) {
426                         memset(filter, 0,
427                                 sizeof(struct rte_eth_ntuple_filter));
428                         rte_flow_error_set(error, EINVAL,
429                                 RTE_FLOW_ERROR_TYPE_ITEM,
430                                 item, "Not supported by ntuple filter");
431                         return -rte_errno;
432                 }
433
434                 filter->dst_port_mask = udp_mask->hdr.dst_port;
435                 filter->src_port_mask = udp_mask->hdr.src_port;
436
437                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
438                 filter->dst_port = udp_spec->hdr.dst_port;
439                 filter->src_port = udp_spec->hdr.src_port;
440         }
441
442         /* check if the next not void item is END */
443         index++;
444         NEXT_ITEM_OF_PATTERN(item, pattern, index);
445         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
446                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
447                 rte_flow_error_set(error, EINVAL,
448                         RTE_FLOW_ERROR_TYPE_ITEM,
449                         item, "Not supported by ntuple filter");
450                 return -rte_errno;
451         }
452
453         /* parse action */
454         index = 0;
455
456         /**
457          * n-tuple only supports forwarding,
458          * check if the first not void action is QUEUE.
459          */
460         NEXT_ITEM_OF_ACTION(act, actions, index);
461         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
462                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
463                 rte_flow_error_set(error, EINVAL,
464                         RTE_FLOW_ERROR_TYPE_ACTION,
465                         item, "Not supported action.");
466                 return -rte_errno;
467         }
468         filter->queue =
469                 ((const struct rte_flow_action_queue *)act->conf)->index;
470
471         /* check if the next not void item is END */
472         index++;
473         NEXT_ITEM_OF_ACTION(act, actions, index);
474         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
475                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
476                 rte_flow_error_set(error, EINVAL,
477                         RTE_FLOW_ERROR_TYPE_ACTION,
478                         act, "Not supported action.");
479                 return -rte_errno;
480         }
481
482         /* parse attr */
483         /* must be input direction */
484         if (!attr->ingress) {
485                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
486                 rte_flow_error_set(error, EINVAL,
487                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
488                                    attr, "Only support ingress.");
489                 return -rte_errno;
490         }
491
492         /* not supported */
493         if (attr->egress) {
494                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
495                 rte_flow_error_set(error, EINVAL,
496                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
497                                    attr, "Not support egress.");
498                 return -rte_errno;
499         }
500
501         if (attr->priority > 0xFFFF) {
502                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
503                 rte_flow_error_set(error, EINVAL,
504                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
505                                    attr, "Error priority.");
506                 return -rte_errno;
507         }
508         filter->priority = (uint16_t)attr->priority;
509         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
510             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
511             filter->priority = 1;
512
513         return 0;
514 }
515
516 /* a specific function for ixgbe because the flags is specific */
517 static int
518 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
519                           const struct rte_flow_item pattern[],
520                           const struct rte_flow_action actions[],
521                           struct rte_eth_ntuple_filter *filter,
522                           struct rte_flow_error *error)
523 {
524         int ret;
525
526         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
527
528         if (ret)
529                 return ret;
530
531         /* Ixgbe doesn't support tcp flags. */
532         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
533                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
534                 rte_flow_error_set(error, EINVAL,
535                                    RTE_FLOW_ERROR_TYPE_ITEM,
536                                    NULL, "Not supported by ntuple filter");
537                 return -rte_errno;
538         }
539
540         /* Ixgbe doesn't support many priorities. */
541         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
542             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
543                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
544                 rte_flow_error_set(error, EINVAL,
545                         RTE_FLOW_ERROR_TYPE_ITEM,
546                         NULL, "Priority not supported by ntuple filter");
547                 return -rte_errno;
548         }
549
550         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
551                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
552                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
553                 return -rte_errno;
554
555         /* fixed value for ixgbe */
556         filter->flags = RTE_5TUPLE_FLAGS;
557         return 0;
558 }
559
560 /**
561  * Parse the rule to see if it is a ethertype rule.
562  * And get the ethertype filter info BTW.
563  * pattern:
564  * The first not void item can be ETH.
565  * The next not void item must be END.
566  * action:
567  * The first not void action should be QUEUE.
568  * The next not void action should be END.
569  * pattern example:
570  * ITEM         Spec                    Mask
571  * ETH          type    0x0807          0xFFFF
572  * END
573  * other members in mask and spec should set to 0x00.
574  * item->last should be NULL.
575  */
576 static int
577 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
578                             const struct rte_flow_item *pattern,
579                             const struct rte_flow_action *actions,
580                             struct rte_eth_ethertype_filter *filter,
581                             struct rte_flow_error *error)
582 {
583         const struct rte_flow_item *item;
584         const struct rte_flow_action *act;
585         const struct rte_flow_item_eth *eth_spec;
586         const struct rte_flow_item_eth *eth_mask;
587         const struct rte_flow_action_queue *act_q;
588         uint32_t index;
589
590         if (!pattern) {
591                 rte_flow_error_set(error, EINVAL,
592                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
593                                 NULL, "NULL pattern.");
594                 return -rte_errno;
595         }
596
597         if (!actions) {
598                 rte_flow_error_set(error, EINVAL,
599                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
600                                 NULL, "NULL action.");
601                 return -rte_errno;
602         }
603
604         if (!attr) {
605                 rte_flow_error_set(error, EINVAL,
606                                    RTE_FLOW_ERROR_TYPE_ATTR,
607                                    NULL, "NULL attribute.");
608                 return -rte_errno;
609         }
610
611         /* Parse pattern */
612         index = 0;
613
614         /* The first non-void item should be MAC. */
615         item = pattern + index;
616         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
617                 index++;
618                 item = pattern + index;
619         }
620         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
621                 rte_flow_error_set(error, EINVAL,
622                         RTE_FLOW_ERROR_TYPE_ITEM,
623                         item, "Not supported by ethertype filter");
624                 return -rte_errno;
625         }
626
627         /*Not supported last point for range*/
628         if (item->last) {
629                 rte_flow_error_set(error, EINVAL,
630                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
631                         item, "Not supported last point for range");
632                 return -rte_errno;
633         }
634
635         /* Get the MAC info. */
636         if (!item->spec || !item->mask) {
637                 rte_flow_error_set(error, EINVAL,
638                                 RTE_FLOW_ERROR_TYPE_ITEM,
639                                 item, "Not supported by ethertype filter");
640                 return -rte_errno;
641         }
642
643         eth_spec = (const struct rte_flow_item_eth *)item->spec;
644         eth_mask = (const struct rte_flow_item_eth *)item->mask;
645
646         /* Mask bits of source MAC address must be full of 0.
647          * Mask bits of destination MAC address must be full
648          * of 1 or full of 0.
649          */
650         if (!is_zero_ether_addr(&eth_mask->src) ||
651             (!is_zero_ether_addr(&eth_mask->dst) &&
652              !is_broadcast_ether_addr(&eth_mask->dst))) {
653                 rte_flow_error_set(error, EINVAL,
654                                 RTE_FLOW_ERROR_TYPE_ITEM,
655                                 item, "Invalid ether address mask");
656                 return -rte_errno;
657         }
658
659         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
660                 rte_flow_error_set(error, EINVAL,
661                                 RTE_FLOW_ERROR_TYPE_ITEM,
662                                 item, "Invalid ethertype mask");
663                 return -rte_errno;
664         }
665
666         /* If mask bits of destination MAC address
667          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
668          */
669         if (is_broadcast_ether_addr(&eth_mask->dst)) {
670                 filter->mac_addr = eth_spec->dst;
671                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
672         } else {
673                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
674         }
675         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
676
677         /* Check if the next non-void item is END. */
678         index++;
679         item = pattern + index;
680         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
681                 index++;
682                 item = pattern + index;
683         }
684         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
685                 rte_flow_error_set(error, EINVAL,
686                                 RTE_FLOW_ERROR_TYPE_ITEM,
687                                 item, "Not supported by ethertype filter.");
688                 return -rte_errno;
689         }
690
691         /* Parse action */
692
693         index = 0;
694         /* Check if the first non-void action is QUEUE or DROP. */
695         act = actions + index;
696         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
697                 index++;
698                 act = actions + index;
699         }
700         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
701             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
702                 rte_flow_error_set(error, EINVAL,
703                                 RTE_FLOW_ERROR_TYPE_ACTION,
704                                 act, "Not supported action.");
705                 return -rte_errno;
706         }
707
708         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
709                 act_q = (const struct rte_flow_action_queue *)act->conf;
710                 filter->queue = act_q->index;
711         } else {
712                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
713         }
714
715         /* Check if the next non-void item is END */
716         index++;
717         act = actions + index;
718         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
719                 index++;
720                 act = actions + index;
721         }
722         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
723                 rte_flow_error_set(error, EINVAL,
724                                 RTE_FLOW_ERROR_TYPE_ACTION,
725                                 act, "Not supported action.");
726                 return -rte_errno;
727         }
728
729         /* Parse attr */
730         /* Must be input direction */
731         if (!attr->ingress) {
732                 rte_flow_error_set(error, EINVAL,
733                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
734                                 attr, "Only support ingress.");
735                 return -rte_errno;
736         }
737
738         /* Not supported */
739         if (attr->egress) {
740                 rte_flow_error_set(error, EINVAL,
741                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
742                                 attr, "Not support egress.");
743                 return -rte_errno;
744         }
745
746         /* Not supported */
747         if (attr->priority) {
748                 rte_flow_error_set(error, EINVAL,
749                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
750                                 attr, "Not support priority.");
751                 return -rte_errno;
752         }
753
754         /* Not supported */
755         if (attr->group) {
756                 rte_flow_error_set(error, EINVAL,
757                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
758                                 attr, "Not support group.");
759                 return -rte_errno;
760         }
761
762         return 0;
763 }
764
765 static int
766 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
767                              const struct rte_flow_item pattern[],
768                              const struct rte_flow_action actions[],
769                              struct rte_eth_ethertype_filter *filter,
770                              struct rte_flow_error *error)
771 {
772         int ret;
773
774         ret = cons_parse_ethertype_filter(attr, pattern,
775                                         actions, filter, error);
776
777         if (ret)
778                 return ret;
779
780         /* Ixgbe doesn't support MAC address. */
781         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
782                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
783                 rte_flow_error_set(error, EINVAL,
784                         RTE_FLOW_ERROR_TYPE_ITEM,
785                         NULL, "Not supported by ethertype filter");
786                 return -rte_errno;
787         }
788
789         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
790                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
791                 rte_flow_error_set(error, EINVAL,
792                         RTE_FLOW_ERROR_TYPE_ITEM,
793                         NULL, "queue index much too big");
794                 return -rte_errno;
795         }
796
797         if (filter->ether_type == ETHER_TYPE_IPv4 ||
798                 filter->ether_type == ETHER_TYPE_IPv6) {
799                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
800                 rte_flow_error_set(error, EINVAL,
801                         RTE_FLOW_ERROR_TYPE_ITEM,
802                         NULL, "IPv4/IPv6 not supported by ethertype filter");
803                 return -rte_errno;
804         }
805
806         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
807                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
808                 rte_flow_error_set(error, EINVAL,
809                         RTE_FLOW_ERROR_TYPE_ITEM,
810                         NULL, "mac compare is unsupported");
811                 return -rte_errno;
812         }
813
814         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
815                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
816                 rte_flow_error_set(error, EINVAL,
817                         RTE_FLOW_ERROR_TYPE_ITEM,
818                         NULL, "drop option is unsupported");
819                 return -rte_errno;
820         }
821
822         return 0;
823 }
824
825 /**
826  * Parse the rule to see if it is a TCP SYN rule.
827  * And get the TCP SYN filter info BTW.
828  * pattern:
829  * The first not void item must be ETH.
830  * The second not void item must be IPV4 or IPV6.
831  * The third not void item must be TCP.
832  * The next not void item must be END.
833  * action:
834  * The first not void action should be QUEUE.
835  * The next not void action should be END.
836  * pattern example:
837  * ITEM         Spec                    Mask
838  * ETH          NULL                    NULL
839  * IPV4/IPV6    NULL                    NULL
840  * TCP          tcp_flags       0x02    0xFF
841  * END
842  * other members in mask and spec should set to 0x00.
843  * item->last should be NULL.
844  */
845 static int
846 cons_parse_syn_filter(const struct rte_flow_attr *attr,
847                                 const struct rte_flow_item pattern[],
848                                 const struct rte_flow_action actions[],
849                                 struct rte_eth_syn_filter *filter,
850                                 struct rte_flow_error *error)
851 {
852         const struct rte_flow_item *item;
853         const struct rte_flow_action *act;
854         const struct rte_flow_item_tcp *tcp_spec;
855         const struct rte_flow_item_tcp *tcp_mask;
856         const struct rte_flow_action_queue *act_q;
857         uint32_t index;
858
859         if (!pattern) {
860                 rte_flow_error_set(error, EINVAL,
861                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
862                                 NULL, "NULL pattern.");
863                 return -rte_errno;
864         }
865
866         if (!actions) {
867                 rte_flow_error_set(error, EINVAL,
868                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
869                                 NULL, "NULL action.");
870                 return -rte_errno;
871         }
872
873         if (!attr) {
874                 rte_flow_error_set(error, EINVAL,
875                                    RTE_FLOW_ERROR_TYPE_ATTR,
876                                    NULL, "NULL attribute.");
877                 return -rte_errno;
878         }
879
880         /* parse pattern */
881         index = 0;
882
883         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
884         NEXT_ITEM_OF_PATTERN(item, pattern, index);
885         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
886             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
887             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
888             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
889                 rte_flow_error_set(error, EINVAL,
890                                 RTE_FLOW_ERROR_TYPE_ITEM,
891                                 item, "Not supported by syn filter");
892                 return -rte_errno;
893         }
894                 /*Not supported last point for range*/
895         if (item->last) {
896                 rte_flow_error_set(error, EINVAL,
897                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
898                         item, "Not supported last point for range");
899                 return -rte_errno;
900         }
901
902         /* Skip Ethernet */
903         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
904                 /* if the item is MAC, the content should be NULL */
905                 if (item->spec || item->mask) {
906                         rte_flow_error_set(error, EINVAL,
907                                 RTE_FLOW_ERROR_TYPE_ITEM,
908                                 item, "Invalid SYN address mask");
909                         return -rte_errno;
910                 }
911
912                 /* check if the next not void item is IPv4 or IPv6 */
913                 index++;
914                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
915                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
916                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
917                         rte_flow_error_set(error, EINVAL,
918                                 RTE_FLOW_ERROR_TYPE_ITEM,
919                                 item, "Not supported by syn filter");
920                         return -rte_errno;
921                 }
922         }
923
924         /* Skip IP */
925         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
926             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
927                 /* if the item is IP, the content should be NULL */
928                 if (item->spec || item->mask) {
929                         rte_flow_error_set(error, EINVAL,
930                                 RTE_FLOW_ERROR_TYPE_ITEM,
931                                 item, "Invalid SYN mask");
932                         return -rte_errno;
933                 }
934
935                 /* check if the next not void item is TCP */
936                 index++;
937                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
938                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
939                         rte_flow_error_set(error, EINVAL,
940                                 RTE_FLOW_ERROR_TYPE_ITEM,
941                                 item, "Not supported by syn filter");
942                         return -rte_errno;
943                 }
944         }
945
946         /* Get the TCP info. Only support SYN. */
947         if (!item->spec || !item->mask) {
948                 rte_flow_error_set(error, EINVAL,
949                                 RTE_FLOW_ERROR_TYPE_ITEM,
950                                 item, "Invalid SYN mask");
951                 return -rte_errno;
952         }
953         /*Not supported last point for range*/
954         if (item->last) {
955                 rte_flow_error_set(error, EINVAL,
956                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
957                         item, "Not supported last point for range");
958                 return -rte_errno;
959         }
960
961         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
962         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
963         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
964             tcp_mask->hdr.src_port ||
965             tcp_mask->hdr.dst_port ||
966             tcp_mask->hdr.sent_seq ||
967             tcp_mask->hdr.recv_ack ||
968             tcp_mask->hdr.data_off ||
969             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
970             tcp_mask->hdr.rx_win ||
971             tcp_mask->hdr.cksum ||
972             tcp_mask->hdr.tcp_urp) {
973                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
974                 rte_flow_error_set(error, EINVAL,
975                                 RTE_FLOW_ERROR_TYPE_ITEM,
976                                 item, "Not supported by syn filter");
977                 return -rte_errno;
978         }
979
980         /* check if the next not void item is END */
981         index++;
982         NEXT_ITEM_OF_PATTERN(item, pattern, index);
983         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
984                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
985                 rte_flow_error_set(error, EINVAL,
986                                 RTE_FLOW_ERROR_TYPE_ITEM,
987                                 item, "Not supported by syn filter");
988                 return -rte_errno;
989         }
990
991         /* parse action */
992         index = 0;
993
994         /* check if the first not void action is QUEUE. */
995         NEXT_ITEM_OF_ACTION(act, actions, index);
996         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
997                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
998                 rte_flow_error_set(error, EINVAL,
999                                 RTE_FLOW_ERROR_TYPE_ACTION,
1000                                 act, "Not supported action.");
1001                 return -rte_errno;
1002         }
1003
1004         act_q = (const struct rte_flow_action_queue *)act->conf;
1005         filter->queue = act_q->index;
1006         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1007                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1008                 rte_flow_error_set(error, EINVAL,
1009                                 RTE_FLOW_ERROR_TYPE_ACTION,
1010                                 act, "Not supported action.");
1011                 return -rte_errno;
1012         }
1013
1014         /* check if the next not void item is END */
1015         index++;
1016         NEXT_ITEM_OF_ACTION(act, actions, index);
1017         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1018                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1019                 rte_flow_error_set(error, EINVAL,
1020                                 RTE_FLOW_ERROR_TYPE_ACTION,
1021                                 act, "Not supported action.");
1022                 return -rte_errno;
1023         }
1024
1025         /* parse attr */
1026         /* must be input direction */
1027         if (!attr->ingress) {
1028                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1029                 rte_flow_error_set(error, EINVAL,
1030                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1031                         attr, "Only support ingress.");
1032                 return -rte_errno;
1033         }
1034
1035         /* not supported */
1036         if (attr->egress) {
1037                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1038                 rte_flow_error_set(error, EINVAL,
1039                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1040                         attr, "Not support egress.");
1041                 return -rte_errno;
1042         }
1043
1044         /* Support 2 priorities, the lowest or highest. */
1045         if (!attr->priority) {
1046                 filter->hig_pri = 0;
1047         } else if (attr->priority == (uint32_t)~0U) {
1048                 filter->hig_pri = 1;
1049         } else {
1050                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1051                 rte_flow_error_set(error, EINVAL,
1052                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1053                         attr, "Not support priority.");
1054                 return -rte_errno;
1055         }
1056
1057         return 0;
1058 }
1059
1060 static int
1061 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1062                              const struct rte_flow_item pattern[],
1063                              const struct rte_flow_action actions[],
1064                              struct rte_eth_syn_filter *filter,
1065                              struct rte_flow_error *error)
1066 {
1067         int ret;
1068
1069         ret = cons_parse_syn_filter(attr, pattern,
1070                                         actions, filter, error);
1071
1072         if (ret)
1073                 return ret;
1074
1075         return 0;
1076 }
1077
1078 /**
1079  * Parse the rule to see if it is a L2 tunnel rule.
1080  * And get the L2 tunnel filter info BTW.
1081  * Only support E-tag now.
1082  * pattern:
1083  * The first not void item can be E_TAG.
1084  * The next not void item must be END.
1085  * action:
1086  * The first not void action should be QUEUE.
1087  * The next not void action should be END.
1088  * pattern example:
1089  * ITEM         Spec                    Mask
1090  * E_TAG        grp             0x1     0x3
1091                 e_cid_base      0x309   0xFFF
1092  * END
1093  * other members in mask and spec should set to 0x00.
1094  * item->last should be NULL.
1095  */
1096 static int
1097 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1098                         const struct rte_flow_item pattern[],
1099                         const struct rte_flow_action actions[],
1100                         struct rte_eth_l2_tunnel_conf *filter,
1101                         struct rte_flow_error *error)
1102 {
1103         const struct rte_flow_item *item;
1104         const struct rte_flow_item_e_tag *e_tag_spec;
1105         const struct rte_flow_item_e_tag *e_tag_mask;
1106         const struct rte_flow_action *act;
1107         const struct rte_flow_action_queue *act_q;
1108         uint32_t index;
1109
1110         if (!pattern) {
1111                 rte_flow_error_set(error, EINVAL,
1112                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1113                         NULL, "NULL pattern.");
1114                 return -rte_errno;
1115         }
1116
1117         if (!actions) {
1118                 rte_flow_error_set(error, EINVAL,
1119                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1120                                    NULL, "NULL action.");
1121                 return -rte_errno;
1122         }
1123
1124         if (!attr) {
1125                 rte_flow_error_set(error, EINVAL,
1126                                    RTE_FLOW_ERROR_TYPE_ATTR,
1127                                    NULL, "NULL attribute.");
1128                 return -rte_errno;
1129         }
1130         /* parse pattern */
1131         index = 0;
1132
1133         /* The first not void item should be e-tag. */
1134         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1135         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1136                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1137                 rte_flow_error_set(error, EINVAL,
1138                         RTE_FLOW_ERROR_TYPE_ITEM,
1139                         item, "Not supported by L2 tunnel filter");
1140                 return -rte_errno;
1141         }
1142
1143         if (!item->spec || !item->mask) {
1144                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1145                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1146                         item, "Not supported by L2 tunnel filter");
1147                 return -rte_errno;
1148         }
1149
1150         /*Not supported last point for range*/
1151         if (item->last) {
1152                 rte_flow_error_set(error, EINVAL,
1153                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1154                         item, "Not supported last point for range");
1155                 return -rte_errno;
1156         }
1157
1158         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1159         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1160
1161         /* Only care about GRP and E cid base. */
1162         if (e_tag_mask->epcp_edei_in_ecid_b ||
1163             e_tag_mask->in_ecid_e ||
1164             e_tag_mask->ecid_e ||
1165             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1166                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1167                 rte_flow_error_set(error, EINVAL,
1168                         RTE_FLOW_ERROR_TYPE_ITEM,
1169                         item, "Not supported by L2 tunnel filter");
1170                 return -rte_errno;
1171         }
1172
1173         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1174         /**
1175          * grp and e_cid_base are bit fields and only use 14 bits.
1176          * e-tag id is taken as little endian by HW.
1177          */
1178         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1179
1180         /* check if the next not void item is END */
1181         index++;
1182         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1183         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1184                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1185                 rte_flow_error_set(error, EINVAL,
1186                         RTE_FLOW_ERROR_TYPE_ITEM,
1187                         item, "Not supported by L2 tunnel filter");
1188                 return -rte_errno;
1189         }
1190
1191         /* parse attr */
1192         /* must be input direction */
1193         if (!attr->ingress) {
1194                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1195                 rte_flow_error_set(error, EINVAL,
1196                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1197                         attr, "Only support ingress.");
1198                 return -rte_errno;
1199         }
1200
1201         /* not supported */
1202         if (attr->egress) {
1203                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1204                 rte_flow_error_set(error, EINVAL,
1205                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1206                         attr, "Not support egress.");
1207                 return -rte_errno;
1208         }
1209
1210         /* not supported */
1211         if (attr->priority) {
1212                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1213                 rte_flow_error_set(error, EINVAL,
1214                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1215                         attr, "Not support priority.");
1216                 return -rte_errno;
1217         }
1218
1219         /* parse action */
1220         index = 0;
1221
1222         /* check if the first not void action is QUEUE. */
1223         NEXT_ITEM_OF_ACTION(act, actions, index);
1224         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1225                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1226                 rte_flow_error_set(error, EINVAL,
1227                         RTE_FLOW_ERROR_TYPE_ACTION,
1228                         act, "Not supported action.");
1229                 return -rte_errno;
1230         }
1231
1232         act_q = (const struct rte_flow_action_queue *)act->conf;
1233         filter->pool = act_q->index;
1234
1235         /* check if the next not void item is END */
1236         index++;
1237         NEXT_ITEM_OF_ACTION(act, actions, index);
1238         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1239                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1240                 rte_flow_error_set(error, EINVAL,
1241                         RTE_FLOW_ERROR_TYPE_ACTION,
1242                         act, "Not supported action.");
1243                 return -rte_errno;
1244         }
1245
1246         return 0;
1247 }
1248
1249 static int
1250 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1251                         const struct rte_flow_attr *attr,
1252                         const struct rte_flow_item pattern[],
1253                         const struct rte_flow_action actions[],
1254                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1255                         struct rte_flow_error *error)
1256 {
1257         int ret = 0;
1258         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1259
1260         ret = cons_parse_l2_tn_filter(attr, pattern,
1261                                 actions, l2_tn_filter, error);
1262
1263         if (hw->mac.type != ixgbe_mac_X550 &&
1264                 hw->mac.type != ixgbe_mac_X550EM_x &&
1265                 hw->mac.type != ixgbe_mac_X550EM_a) {
1266                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1267                 rte_flow_error_set(error, EINVAL,
1268                         RTE_FLOW_ERROR_TYPE_ITEM,
1269                         NULL, "Not supported by L2 tunnel filter");
1270                 return -rte_errno;
1271         }
1272
1273         return ret;
1274 }
1275
1276 /* Parse to get the attr and action info of flow director rule. */
1277 static int
1278 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1279                           const struct rte_flow_action actions[],
1280                           struct ixgbe_fdir_rule *rule,
1281                           struct rte_flow_error *error)
1282 {
1283         const struct rte_flow_action *act;
1284         const struct rte_flow_action_queue *act_q;
1285         const struct rte_flow_action_mark *mark;
1286         uint32_t index;
1287
1288         /* parse attr */
1289         /* must be input direction */
1290         if (!attr->ingress) {
1291                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1292                 rte_flow_error_set(error, EINVAL,
1293                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1294                         attr, "Only support ingress.");
1295                 return -rte_errno;
1296         }
1297
1298         /* not supported */
1299         if (attr->egress) {
1300                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1301                 rte_flow_error_set(error, EINVAL,
1302                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1303                         attr, "Not support egress.");
1304                 return -rte_errno;
1305         }
1306
1307         /* not supported */
1308         if (attr->priority) {
1309                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1310                 rte_flow_error_set(error, EINVAL,
1311                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1312                         attr, "Not support priority.");
1313                 return -rte_errno;
1314         }
1315
1316         /* parse action */
1317         index = 0;
1318
1319         /* check if the first not void action is QUEUE or DROP. */
1320         NEXT_ITEM_OF_ACTION(act, actions, index);
1321         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1322             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1323                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1324                 rte_flow_error_set(error, EINVAL,
1325                         RTE_FLOW_ERROR_TYPE_ACTION,
1326                         act, "Not supported action.");
1327                 return -rte_errno;
1328         }
1329
1330         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1331                 act_q = (const struct rte_flow_action_queue *)act->conf;
1332                 rule->queue = act_q->index;
1333         } else { /* drop */
1334                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1335         }
1336
1337         /* check if the next not void item is MARK */
1338         index++;
1339         NEXT_ITEM_OF_ACTION(act, actions, index);
1340         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1341                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1342                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1343                 rte_flow_error_set(error, EINVAL,
1344                         RTE_FLOW_ERROR_TYPE_ACTION,
1345                         act, "Not supported action.");
1346                 return -rte_errno;
1347         }
1348
1349         rule->soft_id = 0;
1350
1351         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1352                 mark = (const struct rte_flow_action_mark *)act->conf;
1353                 rule->soft_id = mark->id;
1354                 index++;
1355                 NEXT_ITEM_OF_ACTION(act, actions, index);
1356         }
1357
1358         /* check if the next not void item is END */
1359         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1360                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1361                 rte_flow_error_set(error, EINVAL,
1362                         RTE_FLOW_ERROR_TYPE_ACTION,
1363                         act, "Not supported action.");
1364                 return -rte_errno;
1365         }
1366
1367         return 0;
1368 }
1369
1370 /**
1371  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1372  * And get the flow director filter info BTW.
1373  * UDP/TCP/SCTP PATTERN:
1374  * The first not void item can be ETH or IPV4.
1375  * The second not void item must be IPV4 if the first one is ETH.
1376  * The third not void item must be UDP or TCP or SCTP.
1377  * The next not void item must be END.
1378  * MAC VLAN PATTERN:
1379  * The first not void item must be ETH.
1380  * The second not void item must be MAC VLAN.
1381  * The next not void item must be END.
1382  * ACTION:
1383  * The first not void action should be QUEUE or DROP.
1384  * The second not void optional action should be MARK,
1385  * mark_id is a uint32_t number.
1386  * The next not void action should be END.
1387  * UDP/TCP/SCTP pattern example:
1388  * ITEM         Spec                    Mask
1389  * ETH          NULL                    NULL
1390  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1391  *              dst_addr 192.167.3.50   0xFFFFFFFF
1392  * UDP/TCP/SCTP src_port        80      0xFFFF
1393  *              dst_port        80      0xFFFF
1394  * END
1395  * MAC VLAN pattern example:
1396  * ITEM         Spec                    Mask
1397  * ETH          dst_addr
1398                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1399                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1400  * MAC VLAN     tci     0x2016          0xFFFF
1401  *              tpid    0x8100          0xFFFF
1402  * END
1403  * Other members in mask and spec should set to 0x00.
1404  * Item->last should be NULL.
1405  */
1406 static int
1407 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1408                                const struct rte_flow_item pattern[],
1409                                const struct rte_flow_action actions[],
1410                                struct ixgbe_fdir_rule *rule,
1411                                struct rte_flow_error *error)
1412 {
1413         const struct rte_flow_item *item;
1414         const struct rte_flow_item_eth *eth_spec;
1415         const struct rte_flow_item_eth *eth_mask;
1416         const struct rte_flow_item_ipv4 *ipv4_spec;
1417         const struct rte_flow_item_ipv4 *ipv4_mask;
1418         const struct rte_flow_item_tcp *tcp_spec;
1419         const struct rte_flow_item_tcp *tcp_mask;
1420         const struct rte_flow_item_udp *udp_spec;
1421         const struct rte_flow_item_udp *udp_mask;
1422         const struct rte_flow_item_sctp *sctp_spec;
1423         const struct rte_flow_item_sctp *sctp_mask;
1424         const struct rte_flow_item_vlan *vlan_spec;
1425         const struct rte_flow_item_vlan *vlan_mask;
1426
1427         uint32_t index, j;
1428
1429         if (!pattern) {
1430                 rte_flow_error_set(error, EINVAL,
1431                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1432                         NULL, "NULL pattern.");
1433                 return -rte_errno;
1434         }
1435
1436         if (!actions) {
1437                 rte_flow_error_set(error, EINVAL,
1438                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1439                                    NULL, "NULL action.");
1440                 return -rte_errno;
1441         }
1442
1443         if (!attr) {
1444                 rte_flow_error_set(error, EINVAL,
1445                                    RTE_FLOW_ERROR_TYPE_ATTR,
1446                                    NULL, "NULL attribute.");
1447                 return -rte_errno;
1448         }
1449
1450         /**
1451          * Some fields may not be provided. Set spec to 0 and mask to default
1452          * value. So, we need not do anything for the not provided fields later.
1453          */
1454         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1455         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1456         rule->mask.vlan_tci_mask = 0;
1457
1458         /* parse pattern */
1459         index = 0;
1460
1461         /**
1462          * The first not void item should be
1463          * MAC or IPv4 or TCP or UDP or SCTP.
1464          */
1465         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1466         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1467             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1468             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1469             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1470             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1471                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1472                 rte_flow_error_set(error, EINVAL,
1473                         RTE_FLOW_ERROR_TYPE_ITEM,
1474                         item, "Not supported by fdir filter");
1475                 return -rte_errno;
1476         }
1477
1478         rule->mode = RTE_FDIR_MODE_PERFECT;
1479
1480         /*Not supported last point for range*/
1481         if (item->last) {
1482                 rte_flow_error_set(error, EINVAL,
1483                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1484                         item, "Not supported last point for range");
1485                 return -rte_errno;
1486         }
1487
1488         /* Get the MAC info. */
1489         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1490                 /**
1491                  * Only support vlan and dst MAC address,
1492                  * others should be masked.
1493                  */
1494                 if (item->spec && !item->mask) {
1495                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1496                         rte_flow_error_set(error, EINVAL,
1497                                 RTE_FLOW_ERROR_TYPE_ITEM,
1498                                 item, "Not supported by fdir filter");
1499                         return -rte_errno;
1500                 }
1501
1502                 if (item->spec) {
1503                         rule->b_spec = TRUE;
1504                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1505
1506                         /* Get the dst MAC. */
1507                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1508                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1509                                         eth_spec->dst.addr_bytes[j];
1510                         }
1511                 }
1512
1513
1514                 if (item->mask) {
1515                         /* If ethernet has meaning, it means MAC VLAN mode. */
1516                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1517
1518                         rule->b_mask = TRUE;
1519                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1520
1521                         /* Ether type should be masked. */
1522                         if (eth_mask->type) {
1523                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1524                                 rte_flow_error_set(error, EINVAL,
1525                                         RTE_FLOW_ERROR_TYPE_ITEM,
1526                                         item, "Not supported by fdir filter");
1527                                 return -rte_errno;
1528                         }
1529
1530                         /**
1531                          * src MAC address must be masked,
1532                          * and don't support dst MAC address mask.
1533                          */
1534                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1535                                 if (eth_mask->src.addr_bytes[j] ||
1536                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1537                                         memset(rule, 0,
1538                                         sizeof(struct ixgbe_fdir_rule));
1539                                         rte_flow_error_set(error, EINVAL,
1540                                         RTE_FLOW_ERROR_TYPE_ITEM,
1541                                         item, "Not supported by fdir filter");
1542                                         return -rte_errno;
1543                                 }
1544                         }
1545
1546                         /* When no VLAN, considered as full mask. */
1547                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1548                 }
1549                 /*** If both spec and mask are item,
1550                  * it means don't care about ETH.
1551                  * Do nothing.
1552                  */
1553
1554                 /**
1555                  * Check if the next not void item is vlan or ipv4.
1556                  * IPv6 is not supported.
1557                  */
1558                 index++;
1559                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1560                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1561                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1562                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1563                                 rte_flow_error_set(error, EINVAL,
1564                                         RTE_FLOW_ERROR_TYPE_ITEM,
1565                                         item, "Not supported by fdir filter");
1566                                 return -rte_errno;
1567                         }
1568                 } else {
1569                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1570                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1571                                 rte_flow_error_set(error, EINVAL,
1572                                         RTE_FLOW_ERROR_TYPE_ITEM,
1573                                         item, "Not supported by fdir filter");
1574                                 return -rte_errno;
1575                         }
1576                 }
1577         }
1578
1579         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1580                 if (!(item->spec && item->mask)) {
1581                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1582                         rte_flow_error_set(error, EINVAL,
1583                                 RTE_FLOW_ERROR_TYPE_ITEM,
1584                                 item, "Not supported by fdir filter");
1585                         return -rte_errno;
1586                 }
1587
1588                 /*Not supported last point for range*/
1589                 if (item->last) {
1590                         rte_flow_error_set(error, EINVAL,
1591                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1592                                 item, "Not supported last point for range");
1593                         return -rte_errno;
1594                 }
1595
1596                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1597                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1598
1599                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1600                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1601                         rte_flow_error_set(error, EINVAL,
1602                                 RTE_FLOW_ERROR_TYPE_ITEM,
1603                                 item, "Not supported by fdir filter");
1604                         return -rte_errno;
1605                 }
1606
1607                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1608
1609                 if (vlan_mask->tpid != (uint16_t)~0U) {
1610                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1611                         rte_flow_error_set(error, EINVAL,
1612                                 RTE_FLOW_ERROR_TYPE_ITEM,
1613                                 item, "Not supported by fdir filter");
1614                         return -rte_errno;
1615                 }
1616                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1617                 /* More than one tags are not supported. */
1618
1619                 /**
1620                  * Check if the next not void item is not vlan.
1621                  */
1622                 index++;
1623                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1624                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1625                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1626                         rte_flow_error_set(error, EINVAL,
1627                                 RTE_FLOW_ERROR_TYPE_ITEM,
1628                                 item, "Not supported by fdir filter");
1629                         return -rte_errno;
1630                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1631                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1632                         rte_flow_error_set(error, EINVAL,
1633                                 RTE_FLOW_ERROR_TYPE_ITEM,
1634                                 item, "Not supported by fdir filter");
1635                         return -rte_errno;
1636                 }
1637         }
1638
1639         /* Get the IP info. */
1640         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1641                 /**
1642                  * Set the flow type even if there's no content
1643                  * as we must have a flow type.
1644                  */
1645                 rule->ixgbe_fdir.formatted.flow_type =
1646                         IXGBE_ATR_FLOW_TYPE_IPV4;
1647                 /*Not supported last point for range*/
1648                 if (item->last) {
1649                         rte_flow_error_set(error, EINVAL,
1650                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1651                                 item, "Not supported last point for range");
1652                         return -rte_errno;
1653                 }
1654                 /**
1655                  * Only care about src & dst addresses,
1656                  * others should be masked.
1657                  */
1658                 if (!item->mask) {
1659                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1660                         rte_flow_error_set(error, EINVAL,
1661                                 RTE_FLOW_ERROR_TYPE_ITEM,
1662                                 item, "Not supported by fdir filter");
1663                         return -rte_errno;
1664                 }
1665                 rule->b_mask = TRUE;
1666                 ipv4_mask =
1667                         (const struct rte_flow_item_ipv4 *)item->mask;
1668                 if (ipv4_mask->hdr.version_ihl ||
1669                     ipv4_mask->hdr.type_of_service ||
1670                     ipv4_mask->hdr.total_length ||
1671                     ipv4_mask->hdr.packet_id ||
1672                     ipv4_mask->hdr.fragment_offset ||
1673                     ipv4_mask->hdr.time_to_live ||
1674                     ipv4_mask->hdr.next_proto_id ||
1675                     ipv4_mask->hdr.hdr_checksum) {
1676                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1677                         rte_flow_error_set(error, EINVAL,
1678                                 RTE_FLOW_ERROR_TYPE_ITEM,
1679                                 item, "Not supported by fdir filter");
1680                         return -rte_errno;
1681                 }
1682                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1683                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1684
1685                 if (item->spec) {
1686                         rule->b_spec = TRUE;
1687                         ipv4_spec =
1688                                 (const struct rte_flow_item_ipv4 *)item->spec;
1689                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1690                                 ipv4_spec->hdr.dst_addr;
1691                         rule->ixgbe_fdir.formatted.src_ip[0] =
1692                                 ipv4_spec->hdr.src_addr;
1693                 }
1694
1695                 /**
1696                  * Check if the next not void item is
1697                  * TCP or UDP or SCTP or END.
1698                  */
1699                 index++;
1700                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1701                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1702                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1703                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1704                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1705                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1706                         rte_flow_error_set(error, EINVAL,
1707                                 RTE_FLOW_ERROR_TYPE_ITEM,
1708                                 item, "Not supported by fdir filter");
1709                         return -rte_errno;
1710                 }
1711         }
1712
1713         /* Get the TCP info. */
1714         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1715                 /**
1716                  * Set the flow type even if there's no content
1717                  * as we must have a flow type.
1718                  */
1719                 rule->ixgbe_fdir.formatted.flow_type =
1720                         IXGBE_ATR_FLOW_TYPE_TCPV4;
1721                 /*Not supported last point for range*/
1722                 if (item->last) {
1723                         rte_flow_error_set(error, EINVAL,
1724                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1725                                 item, "Not supported last point for range");
1726                         return -rte_errno;
1727                 }
1728                 /**
1729                  * Only care about src & dst ports,
1730                  * others should be masked.
1731                  */
1732                 if (!item->mask) {
1733                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1734                         rte_flow_error_set(error, EINVAL,
1735                                 RTE_FLOW_ERROR_TYPE_ITEM,
1736                                 item, "Not supported by fdir filter");
1737                         return -rte_errno;
1738                 }
1739                 rule->b_mask = TRUE;
1740                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1741                 if (tcp_mask->hdr.sent_seq ||
1742                     tcp_mask->hdr.recv_ack ||
1743                     tcp_mask->hdr.data_off ||
1744                     tcp_mask->hdr.tcp_flags ||
1745                     tcp_mask->hdr.rx_win ||
1746                     tcp_mask->hdr.cksum ||
1747                     tcp_mask->hdr.tcp_urp) {
1748                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1749                         rte_flow_error_set(error, EINVAL,
1750                                 RTE_FLOW_ERROR_TYPE_ITEM,
1751                                 item, "Not supported by fdir filter");
1752                         return -rte_errno;
1753                 }
1754                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1755                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1756
1757                 if (item->spec) {
1758                         rule->b_spec = TRUE;
1759                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1760                         rule->ixgbe_fdir.formatted.src_port =
1761                                 tcp_spec->hdr.src_port;
1762                         rule->ixgbe_fdir.formatted.dst_port =
1763                                 tcp_spec->hdr.dst_port;
1764                 }
1765         }
1766
1767         /* Get the UDP info */
1768         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1769                 /**
1770                  * Set the flow type even if there's no content
1771                  * as we must have a flow type.
1772                  */
1773                 rule->ixgbe_fdir.formatted.flow_type =
1774                         IXGBE_ATR_FLOW_TYPE_UDPV4;
1775                 /*Not supported last point for range*/
1776                 if (item->last) {
1777                         rte_flow_error_set(error, EINVAL,
1778                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1779                                 item, "Not supported last point for range");
1780                         return -rte_errno;
1781                 }
1782                 /**
1783                  * Only care about src & dst ports,
1784                  * others should be masked.
1785                  */
1786                 if (!item->mask) {
1787                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1788                         rte_flow_error_set(error, EINVAL,
1789                                 RTE_FLOW_ERROR_TYPE_ITEM,
1790                                 item, "Not supported by fdir filter");
1791                         return -rte_errno;
1792                 }
1793                 rule->b_mask = TRUE;
1794                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1795                 if (udp_mask->hdr.dgram_len ||
1796                     udp_mask->hdr.dgram_cksum) {
1797                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1798                         rte_flow_error_set(error, EINVAL,
1799                                 RTE_FLOW_ERROR_TYPE_ITEM,
1800                                 item, "Not supported by fdir filter");
1801                         return -rte_errno;
1802                 }
1803                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1804                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1805
1806                 if (item->spec) {
1807                         rule->b_spec = TRUE;
1808                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1809                         rule->ixgbe_fdir.formatted.src_port =
1810                                 udp_spec->hdr.src_port;
1811                         rule->ixgbe_fdir.formatted.dst_port =
1812                                 udp_spec->hdr.dst_port;
1813                 }
1814         }
1815
1816         /* Get the SCTP info */
1817         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1818                 /**
1819                  * Set the flow type even if there's no content
1820                  * as we must have a flow type.
1821                  */
1822                 rule->ixgbe_fdir.formatted.flow_type =
1823                         IXGBE_ATR_FLOW_TYPE_SCTPV4;
1824                 /*Not supported last point for range*/
1825                 if (item->last) {
1826                         rte_flow_error_set(error, EINVAL,
1827                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1828                                 item, "Not supported last point for range");
1829                         return -rte_errno;
1830                 }
1831                 /**
1832                  * Only care about src & dst ports,
1833                  * others should be masked.
1834                  */
1835                 if (!item->mask) {
1836                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1837                         rte_flow_error_set(error, EINVAL,
1838                                 RTE_FLOW_ERROR_TYPE_ITEM,
1839                                 item, "Not supported by fdir filter");
1840                         return -rte_errno;
1841                 }
1842                 rule->b_mask = TRUE;
1843                 sctp_mask =
1844                         (const struct rte_flow_item_sctp *)item->mask;
1845                 if (sctp_mask->hdr.tag ||
1846                     sctp_mask->hdr.cksum) {
1847                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1848                         rte_flow_error_set(error, EINVAL,
1849                                 RTE_FLOW_ERROR_TYPE_ITEM,
1850                                 item, "Not supported by fdir filter");
1851                         return -rte_errno;
1852                 }
1853                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1854                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1855
1856                 if (item->spec) {
1857                         rule->b_spec = TRUE;
1858                         sctp_spec =
1859                                 (const struct rte_flow_item_sctp *)item->spec;
1860                         rule->ixgbe_fdir.formatted.src_port =
1861                                 sctp_spec->hdr.src_port;
1862                         rule->ixgbe_fdir.formatted.dst_port =
1863                                 sctp_spec->hdr.dst_port;
1864                 }
1865         }
1866
1867         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1868                 /* check if the next not void item is END */
1869                 index++;
1870                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1871                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1872                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1873                         rte_flow_error_set(error, EINVAL,
1874                                 RTE_FLOW_ERROR_TYPE_ITEM,
1875                                 item, "Not supported by fdir filter");
1876                         return -rte_errno;
1877                 }
1878         }
1879
1880         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1881 }
1882
1883 #define NVGRE_PROTOCOL 0x6558
1884
1885 /**
1886  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1887  * And get the flow director filter info BTW.
1888  * VxLAN PATTERN:
1889  * The first not void item must be ETH.
1890  * The second not void item must be IPV4/ IPV6.
1891  * The third not void item must be NVGRE.
1892  * The next not void item must be END.
1893  * NVGRE PATTERN:
1894  * The first not void item must be ETH.
1895  * The second not void item must be IPV4/ IPV6.
1896  * The third not void item must be NVGRE.
1897  * The next not void item must be END.
1898  * ACTION:
1899  * The first not void action should be QUEUE or DROP.
1900  * The second not void optional action should be MARK,
1901  * mark_id is a uint32_t number.
1902  * The next not void action should be END.
1903  * VxLAN pattern example:
1904  * ITEM         Spec                    Mask
1905  * ETH          NULL                    NULL
1906  * IPV4/IPV6    NULL                    NULL
1907  * UDP          NULL                    NULL
1908  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1909  * END
1910  * NEGRV pattern example:
1911  * ITEM         Spec                    Mask
1912  * ETH          NULL                    NULL
1913  * IPV4/IPV6    NULL                    NULL
1914  * NVGRE        protocol        0x6558  0xFFFF
1915  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1916  * END
1917  * other members in mask and spec should set to 0x00.
1918  * item->last should be NULL.
1919  */
1920 static int
1921 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1922                                const struct rte_flow_item pattern[],
1923                                const struct rte_flow_action actions[],
1924                                struct ixgbe_fdir_rule *rule,
1925                                struct rte_flow_error *error)
1926 {
1927         const struct rte_flow_item *item;
1928         const struct rte_flow_item_vxlan *vxlan_spec;
1929         const struct rte_flow_item_vxlan *vxlan_mask;
1930         const struct rte_flow_item_nvgre *nvgre_spec;
1931         const struct rte_flow_item_nvgre *nvgre_mask;
1932         const struct rte_flow_item_eth *eth_spec;
1933         const struct rte_flow_item_eth *eth_mask;
1934         const struct rte_flow_item_vlan *vlan_spec;
1935         const struct rte_flow_item_vlan *vlan_mask;
1936         uint32_t index, j;
1937
1938         if (!pattern) {
1939                 rte_flow_error_set(error, EINVAL,
1940                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1941                                    NULL, "NULL pattern.");
1942                 return -rte_errno;
1943         }
1944
1945         if (!actions) {
1946                 rte_flow_error_set(error, EINVAL,
1947                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1948                                    NULL, "NULL action.");
1949                 return -rte_errno;
1950         }
1951
1952         if (!attr) {
1953                 rte_flow_error_set(error, EINVAL,
1954                                    RTE_FLOW_ERROR_TYPE_ATTR,
1955                                    NULL, "NULL attribute.");
1956                 return -rte_errno;
1957         }
1958
1959         /**
1960          * Some fields may not be provided. Set spec to 0 and mask to default
1961          * value. So, we need not do anything for the not provided fields later.
1962          */
1963         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1964         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1965         rule->mask.vlan_tci_mask = 0;
1966
1967         /* parse pattern */
1968         index = 0;
1969
1970         /**
1971          * The first not void item should be
1972          * MAC or IPv4 or IPv6 or UDP or VxLAN.
1973          */
1974         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1975         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1976             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1977             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1978             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1979             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1980             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1981                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1982                 rte_flow_error_set(error, EINVAL,
1983                         RTE_FLOW_ERROR_TYPE_ITEM,
1984                         item, "Not supported by fdir filter");
1985                 return -rte_errno;
1986         }
1987
1988         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1989
1990         /* Skip MAC. */
1991         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1992                 /* Only used to describe the protocol stack. */
1993                 if (item->spec || item->mask) {
1994                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1995                         rte_flow_error_set(error, EINVAL,
1996                                 RTE_FLOW_ERROR_TYPE_ITEM,
1997                                 item, "Not supported by fdir filter");
1998                         return -rte_errno;
1999                 }
2000                 /*Not supported last point for range*/
2001                 if (item->last) {
2002                         rte_flow_error_set(error, EINVAL,
2003                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2004                                 item, "Not supported last point for range");
2005                         return -rte_errno;
2006                 }
2007
2008                 /* Check if the next not void item is IPv4 or IPv6. */
2009                 index++;
2010                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2011                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2012                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2013                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2014                         rte_flow_error_set(error, EINVAL,
2015                                 RTE_FLOW_ERROR_TYPE_ITEM,
2016                                 item, "Not supported by fdir filter");
2017                         return -rte_errno;
2018                 }
2019         }
2020
2021         /* Skip IP. */
2022         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2023             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2024                 /* Only used to describe the protocol stack. */
2025                 if (item->spec || item->mask) {
2026                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2027                         rte_flow_error_set(error, EINVAL,
2028                                 RTE_FLOW_ERROR_TYPE_ITEM,
2029                                 item, "Not supported by fdir filter");
2030                         return -rte_errno;
2031                 }
2032                 /*Not supported last point for range*/
2033                 if (item->last) {
2034                         rte_flow_error_set(error, EINVAL,
2035                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2036                                 item, "Not supported last point for range");
2037                         return -rte_errno;
2038                 }
2039
2040                 /* Check if the next not void item is UDP or NVGRE. */
2041                 index++;
2042                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2043                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2044                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2045                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2046                         rte_flow_error_set(error, EINVAL,
2047                                 RTE_FLOW_ERROR_TYPE_ITEM,
2048                                 item, "Not supported by fdir filter");
2049                         return -rte_errno;
2050                 }
2051         }
2052
2053         /* Skip UDP. */
2054         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2055                 /* Only used to describe the protocol stack. */
2056                 if (item->spec || item->mask) {
2057                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2058                         rte_flow_error_set(error, EINVAL,
2059                                 RTE_FLOW_ERROR_TYPE_ITEM,
2060                                 item, "Not supported by fdir filter");
2061                         return -rte_errno;
2062                 }
2063                 /*Not supported last point for range*/
2064                 if (item->last) {
2065                         rte_flow_error_set(error, EINVAL,
2066                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2067                                 item, "Not supported last point for range");
2068                         return -rte_errno;
2069                 }
2070
2071                 /* Check if the next not void item is VxLAN. */
2072                 index++;
2073                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2074                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2075                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2076                         rte_flow_error_set(error, EINVAL,
2077                                 RTE_FLOW_ERROR_TYPE_ITEM,
2078                                 item, "Not supported by fdir filter");
2079                         return -rte_errno;
2080                 }
2081         }
2082
2083         /* Get the VxLAN info */
2084         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2085                 rule->ixgbe_fdir.formatted.tunnel_type =
2086                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2087
2088                 /* Only care about VNI, others should be masked. */
2089                 if (!item->mask) {
2090                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2091                         rte_flow_error_set(error, EINVAL,
2092                                 RTE_FLOW_ERROR_TYPE_ITEM,
2093                                 item, "Not supported by fdir filter");
2094                         return -rte_errno;
2095                 }
2096                 /*Not supported last point for range*/
2097                 if (item->last) {
2098                         rte_flow_error_set(error, EINVAL,
2099                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2100                                 item, "Not supported last point for range");
2101                         return -rte_errno;
2102                 }
2103                 rule->b_mask = TRUE;
2104
2105                 /* Tunnel type is always meaningful. */
2106                 rule->mask.tunnel_type_mask = 1;
2107
2108                 vxlan_mask =
2109                         (const struct rte_flow_item_vxlan *)item->mask;
2110                 if (vxlan_mask->flags) {
2111                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2112                         rte_flow_error_set(error, EINVAL,
2113                                 RTE_FLOW_ERROR_TYPE_ITEM,
2114                                 item, "Not supported by fdir filter");
2115                         return -rte_errno;
2116                 }
2117                 /* VNI must be totally masked or not. */
2118                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2119                         vxlan_mask->vni[2]) &&
2120                         ((vxlan_mask->vni[0] != 0xFF) ||
2121                         (vxlan_mask->vni[1] != 0xFF) ||
2122                                 (vxlan_mask->vni[2] != 0xFF))) {
2123                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2124                         rte_flow_error_set(error, EINVAL,
2125                                 RTE_FLOW_ERROR_TYPE_ITEM,
2126                                 item, "Not supported by fdir filter");
2127                         return -rte_errno;
2128                 }
2129
2130                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2131                         RTE_DIM(vxlan_mask->vni));
2132                 rule->mask.tunnel_id_mask <<= 8;
2133
2134                 if (item->spec) {
2135                         rule->b_spec = TRUE;
2136                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2137                                         item->spec;
2138                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2139                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2140                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2141                 }
2142         }
2143
2144         /* Get the NVGRE info */
2145         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2146                 rule->ixgbe_fdir.formatted.tunnel_type =
2147                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2148
2149                 /**
2150                  * Only care about flags0, flags1, protocol and TNI,
2151                  * others should be masked.
2152                  */
2153                 if (!item->mask) {
2154                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2155                         rte_flow_error_set(error, EINVAL,
2156                                 RTE_FLOW_ERROR_TYPE_ITEM,
2157                                 item, "Not supported by fdir filter");
2158                         return -rte_errno;
2159                 }
2160                 /*Not supported last point for range*/
2161                 if (item->last) {
2162                         rte_flow_error_set(error, EINVAL,
2163                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2164                                 item, "Not supported last point for range");
2165                         return -rte_errno;
2166                 }
2167                 rule->b_mask = TRUE;
2168
2169                 /* Tunnel type is always meaningful. */
2170                 rule->mask.tunnel_type_mask = 1;
2171
2172                 nvgre_mask =
2173                         (const struct rte_flow_item_nvgre *)item->mask;
2174                 if (nvgre_mask->flow_id) {
2175                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2176                         rte_flow_error_set(error, EINVAL,
2177                                 RTE_FLOW_ERROR_TYPE_ITEM,
2178                                 item, "Not supported by fdir filter");
2179                         return -rte_errno;
2180                 }
2181                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2182                         rte_cpu_to_be_16(0x3000) ||
2183                     nvgre_mask->protocol != 0xFFFF) {
2184                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2185                         rte_flow_error_set(error, EINVAL,
2186                                 RTE_FLOW_ERROR_TYPE_ITEM,
2187                                 item, "Not supported by fdir filter");
2188                         return -rte_errno;
2189                 }
2190                 /* TNI must be totally masked or not. */
2191                 if (nvgre_mask->tni[0] &&
2192                     ((nvgre_mask->tni[0] != 0xFF) ||
2193                     (nvgre_mask->tni[1] != 0xFF) ||
2194                     (nvgre_mask->tni[2] != 0xFF))) {
2195                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2196                         rte_flow_error_set(error, EINVAL,
2197                                 RTE_FLOW_ERROR_TYPE_ITEM,
2198                                 item, "Not supported by fdir filter");
2199                         return -rte_errno;
2200                 }
2201                 /* tni is a 24-bits bit field */
2202                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2203                         RTE_DIM(nvgre_mask->tni));
2204                 rule->mask.tunnel_id_mask <<= 8;
2205
2206                 if (item->spec) {
2207                         rule->b_spec = TRUE;
2208                         nvgre_spec =
2209                                 (const struct rte_flow_item_nvgre *)item->spec;
2210                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2211                             rte_cpu_to_be_16(0x2000) ||
2212                             nvgre_spec->protocol !=
2213                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2214                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2215                                 rte_flow_error_set(error, EINVAL,
2216                                         RTE_FLOW_ERROR_TYPE_ITEM,
2217                                         item, "Not supported by fdir filter");
2218                                 return -rte_errno;
2219                         }
2220                         /* tni is a 24-bits bit field */
2221                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2222                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2223                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2224                 }
2225         }
2226
2227         /* check if the next not void item is MAC */
2228         index++;
2229         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2230         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2231                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2232                 rte_flow_error_set(error, EINVAL,
2233                         RTE_FLOW_ERROR_TYPE_ITEM,
2234                         item, "Not supported by fdir filter");
2235                 return -rte_errno;
2236         }
2237
2238         /**
2239          * Only support vlan and dst MAC address,
2240          * others should be masked.
2241          */
2242
2243         if (!item->mask) {
2244                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2245                 rte_flow_error_set(error, EINVAL,
2246                         RTE_FLOW_ERROR_TYPE_ITEM,
2247                         item, "Not supported by fdir filter");
2248                 return -rte_errno;
2249         }
2250         /*Not supported last point for range*/
2251         if (item->last) {
2252                 rte_flow_error_set(error, EINVAL,
2253                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2254                         item, "Not supported last point for range");
2255                 return -rte_errno;
2256         }
2257         rule->b_mask = TRUE;
2258         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2259
2260         /* Ether type should be masked. */
2261         if (eth_mask->type) {
2262                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2263                 rte_flow_error_set(error, EINVAL,
2264                         RTE_FLOW_ERROR_TYPE_ITEM,
2265                         item, "Not supported by fdir filter");
2266                 return -rte_errno;
2267         }
2268
2269         /* src MAC address should be masked. */
2270         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2271                 if (eth_mask->src.addr_bytes[j]) {
2272                         memset(rule, 0,
2273                                sizeof(struct ixgbe_fdir_rule));
2274                         rte_flow_error_set(error, EINVAL,
2275                                 RTE_FLOW_ERROR_TYPE_ITEM,
2276                                 item, "Not supported by fdir filter");
2277                         return -rte_errno;
2278                 }
2279         }
2280         rule->mask.mac_addr_byte_mask = 0;
2281         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2282                 /* It's a per byte mask. */
2283                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2284                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2285                 } else if (eth_mask->dst.addr_bytes[j]) {
2286                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2287                         rte_flow_error_set(error, EINVAL,
2288                                 RTE_FLOW_ERROR_TYPE_ITEM,
2289                                 item, "Not supported by fdir filter");
2290                         return -rte_errno;
2291                 }
2292         }
2293
2294         /* When no vlan, considered as full mask. */
2295         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2296
2297         if (item->spec) {
2298                 rule->b_spec = TRUE;
2299                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2300
2301                 /* Get the dst MAC. */
2302                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2303                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2304                                 eth_spec->dst.addr_bytes[j];
2305                 }
2306         }
2307
2308         /**
2309          * Check if the next not void item is vlan or ipv4.
2310          * IPv6 is not supported.
2311          */
2312         index++;
2313         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2314         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2315                 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2316                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2317                 rte_flow_error_set(error, EINVAL,
2318                         RTE_FLOW_ERROR_TYPE_ITEM,
2319                         item, "Not supported by fdir filter");
2320                 return -rte_errno;
2321         }
2322         /*Not supported last point for range*/
2323         if (item->last) {
2324                 rte_flow_error_set(error, EINVAL,
2325                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2326                         item, "Not supported last point for range");
2327                 return -rte_errno;
2328         }
2329
2330         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2331                 if (!(item->spec && item->mask)) {
2332                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2333                         rte_flow_error_set(error, EINVAL,
2334                                 RTE_FLOW_ERROR_TYPE_ITEM,
2335                                 item, "Not supported by fdir filter");
2336                         return -rte_errno;
2337                 }
2338
2339                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2340                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2341
2342                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2343                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2344                         rte_flow_error_set(error, EINVAL,
2345                                 RTE_FLOW_ERROR_TYPE_ITEM,
2346                                 item, "Not supported by fdir filter");
2347                         return -rte_errno;
2348                 }
2349
2350                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2351
2352                 if (vlan_mask->tpid != (uint16_t)~0U) {
2353                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2354                         rte_flow_error_set(error, EINVAL,
2355                                 RTE_FLOW_ERROR_TYPE_ITEM,
2356                                 item, "Not supported by fdir filter");
2357                         return -rte_errno;
2358                 }
2359                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2360                 /* More than one tags are not supported. */
2361
2362                 /**
2363                  * Check if the next not void item is not vlan.
2364                  */
2365                 index++;
2366                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2367                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2368                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2369                         rte_flow_error_set(error, EINVAL,
2370                                 RTE_FLOW_ERROR_TYPE_ITEM,
2371                                 item, "Not supported by fdir filter");
2372                         return -rte_errno;
2373                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2374                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2375                         rte_flow_error_set(error, EINVAL,
2376                                 RTE_FLOW_ERROR_TYPE_ITEM,
2377                                 item, "Not supported by fdir filter");
2378                         return -rte_errno;
2379                 }
2380                 /* check if the next not void item is END */
2381                 index++;
2382                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2383                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2384                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2385                         rte_flow_error_set(error, EINVAL,
2386                                 RTE_FLOW_ERROR_TYPE_ITEM,
2387                                 item, "Not supported by fdir filter");
2388                         return -rte_errno;
2389                 }
2390         }
2391
2392         /**
2393          * If the tags is 0, it means don't care about the VLAN.
2394          * Do nothing.
2395          */
2396
2397         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2398 }
2399
2400 static int
2401 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
2402                         const struct rte_flow_attr *attr,
2403                         const struct rte_flow_item pattern[],
2404                         const struct rte_flow_action actions[],
2405                         struct ixgbe_fdir_rule *rule,
2406                         struct rte_flow_error *error)
2407 {
2408         int ret = 0;
2409
2410         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2411
2412         ixgbe_parse_fdir_filter(attr, pattern, actions,
2413                                 rule, error);
2414
2415
2416         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2417             fdir_mode != rule->mode)
2418                 return -ENOTSUP;
2419
2420         return ret;
2421 }
2422
2423 static int
2424 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
2425                         const struct rte_flow_item pattern[],
2426                         const struct rte_flow_action actions[],
2427                         struct ixgbe_fdir_rule *rule,
2428                         struct rte_flow_error *error)
2429 {
2430         int ret;
2431
2432         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2433                                         actions, rule, error);
2434
2435         if (!ret)
2436                 return 0;
2437
2438         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2439                                         actions, rule, error);
2440
2441         return ret;
2442 }
2443
2444 /**
2445  * Create or destroy a flow rule.
2446  * Theorically one rule can match more than one filters.
2447  * We will let it use the filter which it hitt first.
2448  * So, the sequence matters.
2449  */
2450 static struct rte_flow *
2451 ixgbe_flow_create(struct rte_eth_dev *dev,
2452                   const struct rte_flow_attr *attr,
2453                   const struct rte_flow_item pattern[],
2454                   const struct rte_flow_action actions[],
2455                   struct rte_flow_error *error)
2456 {
2457         int ret;
2458         struct rte_eth_ntuple_filter ntuple_filter;
2459         struct rte_eth_ethertype_filter ethertype_filter;
2460         struct rte_eth_syn_filter syn_filter;
2461         struct ixgbe_fdir_rule fdir_rule;
2462         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2463         struct ixgbe_hw_fdir_info *fdir_info =
2464                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2465         struct rte_flow *flow = NULL;
2466         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2467         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2468         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2469         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2470         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2471         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2472
2473         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2474         if (!flow) {
2475                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2476                 return (struct rte_flow *)flow;
2477         }
2478         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2479                         sizeof(struct ixgbe_flow_mem), 0);
2480         if (!ixgbe_flow_mem_ptr) {
2481                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2482                 rte_free(flow);
2483                 return NULL;
2484         }
2485         ixgbe_flow_mem_ptr->flow = flow;
2486         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2487                                 ixgbe_flow_mem_ptr, entries);
2488
2489         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2490         ret = ixgbe_parse_ntuple_filter(attr, pattern,
2491                         actions, &ntuple_filter, error);
2492         if (!ret) {
2493                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2494                 if (!ret) {
2495                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2496                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2497                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2498                                 &ntuple_filter,
2499                                 sizeof(struct rte_eth_ntuple_filter));
2500                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2501                                 ntuple_filter_ptr, entries);
2502                         flow->rule = ntuple_filter_ptr;
2503                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2504                         return flow;
2505                 }
2506                 goto out;
2507         }
2508
2509         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2510         ret = ixgbe_parse_ethertype_filter(attr, pattern,
2511                                 actions, &ethertype_filter, error);
2512         if (!ret) {
2513                 ret = ixgbe_add_del_ethertype_filter(dev,
2514                                 &ethertype_filter, TRUE);
2515                 if (!ret) {
2516                         ethertype_filter_ptr = rte_zmalloc(
2517                                 "ixgbe_ethertype_filter",
2518                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2519                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2520                                 &ethertype_filter,
2521                                 sizeof(struct rte_eth_ethertype_filter));
2522                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2523                                 ethertype_filter_ptr, entries);
2524                         flow->rule = ethertype_filter_ptr;
2525                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2526                         return flow;
2527                 }
2528                 goto out;
2529         }
2530
2531         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2532         ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter, error);
2533         if (!ret) {
2534                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2535                 if (!ret) {
2536                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2537                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2538                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2539                                 &syn_filter,
2540                                 sizeof(struct rte_eth_syn_filter));
2541                         TAILQ_INSERT_TAIL(&filter_syn_list,
2542                                 syn_filter_ptr,
2543                                 entries);
2544                         flow->rule = syn_filter_ptr;
2545                         flow->filter_type = RTE_ETH_FILTER_SYN;
2546                         return flow;
2547                 }
2548                 goto out;
2549         }
2550
2551         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2552         ret = ixgbe_parse_fdir_filter(attr, pattern,
2553                                 actions, &fdir_rule, error);
2554         if (!ret) {
2555                 /* A mask cannot be deleted. */
2556                 if (fdir_rule.b_mask) {
2557                         if (!fdir_info->mask_added) {
2558                                 /* It's the first time the mask is set. */
2559                                 rte_memcpy(&fdir_info->mask,
2560                                         &fdir_rule.mask,
2561                                         sizeof(struct ixgbe_hw_fdir_mask));
2562                                 ret = ixgbe_fdir_set_input_mask(dev);
2563                                 if (ret)
2564                                         goto out;
2565
2566                                 fdir_info->mask_added = TRUE;
2567                         } else {
2568                                 /**
2569                                  * Only support one global mask,
2570                                  * all the masks should be the same.
2571                                  */
2572                                 ret = memcmp(&fdir_info->mask,
2573                                         &fdir_rule.mask,
2574                                         sizeof(struct ixgbe_hw_fdir_mask));
2575                                 if (ret)
2576                                         goto out;
2577                         }
2578                 }
2579
2580                 if (fdir_rule.b_spec) {
2581                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2582                                         FALSE, FALSE);
2583                         if (!ret) {
2584                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2585                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2586                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2587                                         &fdir_rule,
2588                                         sizeof(struct ixgbe_fdir_rule));
2589                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2590                                         fdir_rule_ptr, entries);
2591                                 flow->rule = fdir_rule_ptr;
2592                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2593
2594                                 return flow;
2595                         }
2596
2597                         if (ret)
2598                                 goto out;
2599                 }
2600
2601                 goto out;
2602         }
2603
2604         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2605         ret = cons_parse_l2_tn_filter(attr, pattern,
2606                                         actions, &l2_tn_filter, error);
2607         if (!ret) {
2608                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2609                 if (!ret) {
2610                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2611                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2612                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2613                                 &l2_tn_filter,
2614                                 sizeof(struct rte_eth_l2_tunnel_conf));
2615                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2616                                 l2_tn_filter_ptr, entries);
2617                         flow->rule = l2_tn_filter_ptr;
2618                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2619                         return flow;
2620                 }
2621         }
2622
2623 out:
2624         TAILQ_REMOVE(&ixgbe_flow_list,
2625                 ixgbe_flow_mem_ptr, entries);
2626         rte_free(ixgbe_flow_mem_ptr);
2627         rte_free(flow);
2628         return NULL;
2629 }
2630
2631 /**
2632  * Check if the flow rule is supported by ixgbe.
2633  * It only checkes the format. Don't guarantee the rule can be programmed into
2634  * the HW. Because there can be no enough room for the rule.
2635  */
2636 static int
2637 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2638                 const struct rte_flow_attr *attr,
2639                 const struct rte_flow_item pattern[],
2640                 const struct rte_flow_action actions[],
2641                 struct rte_flow_error *error)
2642 {
2643         struct rte_eth_ntuple_filter ntuple_filter;
2644         struct rte_eth_ethertype_filter ethertype_filter;
2645         struct rte_eth_syn_filter syn_filter;
2646         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2647         struct ixgbe_fdir_rule fdir_rule;
2648         int ret;
2649
2650         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2651         ret = ixgbe_parse_ntuple_filter(attr, pattern,
2652                                 actions, &ntuple_filter, error);
2653         if (!ret)
2654                 return 0;
2655
2656         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2657         ret = ixgbe_parse_ethertype_filter(attr, pattern,
2658                                 actions, &ethertype_filter, error);
2659         if (!ret)
2660                 return 0;
2661
2662         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2663         ret = ixgbe_parse_syn_filter(attr, pattern,
2664                                 actions, &syn_filter, error);
2665         if (!ret)
2666                 return 0;
2667
2668         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2669         ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
2670                                 actions, &fdir_rule, error);
2671         if (!ret)
2672                 return 0;
2673
2674         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2675         ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
2676                                 actions, &l2_tn_filter, error);
2677
2678         return ret;
2679 }
2680
2681 /*  Destroy all flow rules associated with a port on ixgbe. */
2682 static int
2683 ixgbe_flow_flush(struct rte_eth_dev *dev,
2684                 struct rte_flow_error *error)
2685 {
2686         int ret = 0;
2687
2688         ixgbe_clear_all_ntuple_filter(dev);
2689         ixgbe_clear_all_ethertype_filter(dev);
2690         ixgbe_clear_syn_filter(dev);
2691
2692         ret = ixgbe_clear_all_fdir_filter(dev);
2693         if (ret < 0) {
2694                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2695                                         NULL, "Failed to flush rule");
2696                 return ret;
2697         }
2698
2699         ret = ixgbe_clear_all_l2_tn_filter(dev);
2700         if (ret < 0) {
2701                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2702                                         NULL, "Failed to flush rule");
2703                 return ret;
2704         }
2705
2706         return 0;
2707 }