drivers: use SPDX tag for Intel copyright files
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_eal.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
31 #include <rte_dev.h>
32 #include <rte_hash_crc.h>
33 #include <rte_flow.h>
34 #include <rte_flow_driver.h>
35
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
46
47
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
51
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55         struct rte_eth_ntuple_filter filter_info;
56 };
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60         struct rte_eth_ethertype_filter filter_info;
61 };
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65         struct rte_eth_syn_filter filter_info;
66 };
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70         struct ixgbe_fdir_rule filter_info;
71 };
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75         struct rte_eth_l2_tunnel_conf filter_info;
76 };
77 /* ixgbe_flow memory list structure */
78 struct ixgbe_flow_mem {
79         TAILQ_ENTRY(ixgbe_flow_mem) entries;
80         struct rte_flow *flow;
81 };
82
83 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
84 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
85 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
86 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
87 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
88 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
89
90 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
91 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
92 static struct ixgbe_syn_filter_list filter_syn_list;
93 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
94 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
95 static struct ixgbe_flow_mem_list ixgbe_flow_list;
96
97 /**
98  * Endless loop will never happen with below assumption
99  * 1. there is at least one no-void item(END)
100  * 2. cur is before END.
101  */
102 static inline
103 const struct rte_flow_item *next_no_void_pattern(
104                 const struct rte_flow_item pattern[],
105                 const struct rte_flow_item *cur)
106 {
107         const struct rte_flow_item *next =
108                 cur ? cur + 1 : &pattern[0];
109         while (1) {
110                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
111                         return next;
112                 next++;
113         }
114 }
115
116 static inline
117 const struct rte_flow_action *next_no_void_action(
118                 const struct rte_flow_action actions[],
119                 const struct rte_flow_action *cur)
120 {
121         const struct rte_flow_action *next =
122                 cur ? cur + 1 : &actions[0];
123         while (1) {
124                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
125                         return next;
126                 next++;
127         }
128 }
129
130 /**
131  * Please aware there's an asumption for all the parsers.
132  * rte_flow_item is using big endian, rte_flow_attr and
133  * rte_flow_action are using CPU order.
134  * Because the pattern is used to describe the packets,
135  * normally the packets should use network order.
136  */
137
138 /**
139  * Parse the rule to see if it is a n-tuple rule.
140  * And get the n-tuple filter info BTW.
141  * pattern:
142  * The first not void item can be ETH or IPV4.
143  * The second not void item must be IPV4 if the first one is ETH.
144  * The third not void item must be UDP or TCP.
145  * The next not void item must be END.
146  * action:
147  * The first not void action should be QUEUE.
148  * The next not void action should be END.
149  * pattern example:
150  * ITEM         Spec                    Mask
151  * ETH          NULL                    NULL
152  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
153  *              dst_addr 192.167.3.50   0xFFFFFFFF
154  *              next_proto_id   17      0xFF
155  * UDP/TCP/     src_port        80      0xFFFF
156  * SCTP         dst_port        80      0xFFFF
157  * END
158  * other members in mask and spec should set to 0x00.
159  * item->last should be NULL.
160  *
161  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
162  *
163  */
164 static int
165 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
166                          const struct rte_flow_item pattern[],
167                          const struct rte_flow_action actions[],
168                          struct rte_eth_ntuple_filter *filter,
169                          struct rte_flow_error *error)
170 {
171         const struct rte_flow_item *item;
172         const struct rte_flow_action *act;
173         const struct rte_flow_item_ipv4 *ipv4_spec;
174         const struct rte_flow_item_ipv4 *ipv4_mask;
175         const struct rte_flow_item_tcp *tcp_spec;
176         const struct rte_flow_item_tcp *tcp_mask;
177         const struct rte_flow_item_udp *udp_spec;
178         const struct rte_flow_item_udp *udp_mask;
179         const struct rte_flow_item_sctp *sctp_spec;
180         const struct rte_flow_item_sctp *sctp_mask;
181
182         if (!pattern) {
183                 rte_flow_error_set(error,
184                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
185                         NULL, "NULL pattern.");
186                 return -rte_errno;
187         }
188
189         if (!actions) {
190                 rte_flow_error_set(error, EINVAL,
191                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
192                                    NULL, "NULL action.");
193                 return -rte_errno;
194         }
195         if (!attr) {
196                 rte_flow_error_set(error, EINVAL,
197                                    RTE_FLOW_ERROR_TYPE_ATTR,
198                                    NULL, "NULL attribute.");
199                 return -rte_errno;
200         }
201
202 #ifdef RTE_LIBRTE_SECURITY
203         /**
204          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
205          */
206         act = next_no_void_action(actions, NULL);
207         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
208                 const void *conf = act->conf;
209                 /* check if the next not void item is END */
210                 act = next_no_void_action(actions, act);
211                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
212                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
213                         rte_flow_error_set(error, EINVAL,
214                                 RTE_FLOW_ERROR_TYPE_ACTION,
215                                 act, "Not supported action.");
216                         return -rte_errno;
217                 }
218
219                 /* get the IP pattern*/
220                 item = next_no_void_pattern(pattern, NULL);
221                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
222                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
223                         if (item->last ||
224                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
225                                 rte_flow_error_set(error, EINVAL,
226                                         RTE_FLOW_ERROR_TYPE_ITEM,
227                                         item, "IP pattern missing.");
228                                 return -rte_errno;
229                         }
230                         item = next_no_void_pattern(pattern, item);
231                 }
232
233                 filter->proto = IPPROTO_ESP;
234                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
235                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
236         }
237 #endif
238
239         /* the first not void item can be MAC or IPv4 */
240         item = next_no_void_pattern(pattern, NULL);
241
242         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
243             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
244                 rte_flow_error_set(error, EINVAL,
245                         RTE_FLOW_ERROR_TYPE_ITEM,
246                         item, "Not supported by ntuple filter");
247                 return -rte_errno;
248         }
249         /* Skip Ethernet */
250         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
251                 /*Not supported last point for range*/
252                 if (item->last) {
253                         rte_flow_error_set(error,
254                           EINVAL,
255                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
256                           item, "Not supported last point for range");
257                         return -rte_errno;
258
259                 }
260                 /* if the first item is MAC, the content should be NULL */
261                 if (item->spec || item->mask) {
262                         rte_flow_error_set(error, EINVAL,
263                                 RTE_FLOW_ERROR_TYPE_ITEM,
264                                 item, "Not supported by ntuple filter");
265                         return -rte_errno;
266                 }
267                 /* check if the next not void item is IPv4 */
268                 item = next_no_void_pattern(pattern, item);
269                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
270                         rte_flow_error_set(error,
271                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
272                           item, "Not supported by ntuple filter");
273                           return -rte_errno;
274                 }
275         }
276
277         /* get the IPv4 info */
278         if (!item->spec || !item->mask) {
279                 rte_flow_error_set(error, EINVAL,
280                         RTE_FLOW_ERROR_TYPE_ITEM,
281                         item, "Invalid ntuple mask");
282                 return -rte_errno;
283         }
284         /*Not supported last point for range*/
285         if (item->last) {
286                 rte_flow_error_set(error, EINVAL,
287                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
288                         item, "Not supported last point for range");
289                 return -rte_errno;
290
291         }
292
293         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
294         /**
295          * Only support src & dst addresses, protocol,
296          * others should be masked.
297          */
298         if (ipv4_mask->hdr.version_ihl ||
299             ipv4_mask->hdr.type_of_service ||
300             ipv4_mask->hdr.total_length ||
301             ipv4_mask->hdr.packet_id ||
302             ipv4_mask->hdr.fragment_offset ||
303             ipv4_mask->hdr.time_to_live ||
304             ipv4_mask->hdr.hdr_checksum) {
305                         rte_flow_error_set(error,
306                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
307                         item, "Not supported by ntuple filter");
308                 return -rte_errno;
309         }
310
311         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
312         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
313         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
314
315         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
316         filter->dst_ip = ipv4_spec->hdr.dst_addr;
317         filter->src_ip = ipv4_spec->hdr.src_addr;
318         filter->proto  = ipv4_spec->hdr.next_proto_id;
319
320         /* check if the next not void item is TCP or UDP */
321         item = next_no_void_pattern(pattern, item);
322         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
323             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
324             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
325             item->type != RTE_FLOW_ITEM_TYPE_END) {
326                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
327                 rte_flow_error_set(error, EINVAL,
328                         RTE_FLOW_ERROR_TYPE_ITEM,
329                         item, "Not supported by ntuple filter");
330                 return -rte_errno;
331         }
332
333         /* get the TCP/UDP info */
334         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
335                 (!item->spec || !item->mask)) {
336                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
337                 rte_flow_error_set(error, EINVAL,
338                         RTE_FLOW_ERROR_TYPE_ITEM,
339                         item, "Invalid ntuple mask");
340                 return -rte_errno;
341         }
342
343         /*Not supported last point for range*/
344         if (item->last) {
345                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
346                 rte_flow_error_set(error, EINVAL,
347                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
348                         item, "Not supported last point for range");
349                 return -rte_errno;
350
351         }
352
353         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
354                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
355
356                 /**
357                  * Only support src & dst ports, tcp flags,
358                  * others should be masked.
359                  */
360                 if (tcp_mask->hdr.sent_seq ||
361                     tcp_mask->hdr.recv_ack ||
362                     tcp_mask->hdr.data_off ||
363                     tcp_mask->hdr.rx_win ||
364                     tcp_mask->hdr.cksum ||
365                     tcp_mask->hdr.tcp_urp) {
366                         memset(filter, 0,
367                                 sizeof(struct rte_eth_ntuple_filter));
368                         rte_flow_error_set(error, EINVAL,
369                                 RTE_FLOW_ERROR_TYPE_ITEM,
370                                 item, "Not supported by ntuple filter");
371                         return -rte_errno;
372                 }
373
374                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
375                 filter->src_port_mask  = tcp_mask->hdr.src_port;
376                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
377                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
378                 } else if (!tcp_mask->hdr.tcp_flags) {
379                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
380                 } else {
381                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
382                         rte_flow_error_set(error, EINVAL,
383                                 RTE_FLOW_ERROR_TYPE_ITEM,
384                                 item, "Not supported by ntuple filter");
385                         return -rte_errno;
386                 }
387
388                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
389                 filter->dst_port  = tcp_spec->hdr.dst_port;
390                 filter->src_port  = tcp_spec->hdr.src_port;
391                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
392         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
393                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
394
395                 /**
396                  * Only support src & dst ports,
397                  * others should be masked.
398                  */
399                 if (udp_mask->hdr.dgram_len ||
400                     udp_mask->hdr.dgram_cksum) {
401                         memset(filter, 0,
402                                 sizeof(struct rte_eth_ntuple_filter));
403                         rte_flow_error_set(error, EINVAL,
404                                 RTE_FLOW_ERROR_TYPE_ITEM,
405                                 item, "Not supported by ntuple filter");
406                         return -rte_errno;
407                 }
408
409                 filter->dst_port_mask = udp_mask->hdr.dst_port;
410                 filter->src_port_mask = udp_mask->hdr.src_port;
411
412                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
413                 filter->dst_port = udp_spec->hdr.dst_port;
414                 filter->src_port = udp_spec->hdr.src_port;
415         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
416                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
417
418                 /**
419                  * Only support src & dst ports,
420                  * others should be masked.
421                  */
422                 if (sctp_mask->hdr.tag ||
423                     sctp_mask->hdr.cksum) {
424                         memset(filter, 0,
425                                 sizeof(struct rte_eth_ntuple_filter));
426                         rte_flow_error_set(error, EINVAL,
427                                 RTE_FLOW_ERROR_TYPE_ITEM,
428                                 item, "Not supported by ntuple filter");
429                         return -rte_errno;
430                 }
431
432                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
433                 filter->src_port_mask = sctp_mask->hdr.src_port;
434
435                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
436                 filter->dst_port = sctp_spec->hdr.dst_port;
437                 filter->src_port = sctp_spec->hdr.src_port;
438         } else {
439                 goto action;
440         }
441
442         /* check if the next not void item is END */
443         item = next_no_void_pattern(pattern, item);
444         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
445                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
446                 rte_flow_error_set(error, EINVAL,
447                         RTE_FLOW_ERROR_TYPE_ITEM,
448                         item, "Not supported by ntuple filter");
449                 return -rte_errno;
450         }
451
452 action:
453
454         /**
455          * n-tuple only supports forwarding,
456          * check if the first not void action is QUEUE.
457          */
458         act = next_no_void_action(actions, NULL);
459         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
460                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
461                 rte_flow_error_set(error, EINVAL,
462                         RTE_FLOW_ERROR_TYPE_ACTION,
463                         item, "Not supported action.");
464                 return -rte_errno;
465         }
466         filter->queue =
467                 ((const struct rte_flow_action_queue *)act->conf)->index;
468
469         /* check if the next not void item is END */
470         act = next_no_void_action(actions, act);
471         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
472                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473                 rte_flow_error_set(error, EINVAL,
474                         RTE_FLOW_ERROR_TYPE_ACTION,
475                         act, "Not supported action.");
476                 return -rte_errno;
477         }
478
479         /* parse attr */
480         /* must be input direction */
481         if (!attr->ingress) {
482                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
483                 rte_flow_error_set(error, EINVAL,
484                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
485                                    attr, "Only support ingress.");
486                 return -rte_errno;
487         }
488
489         /* not supported */
490         if (attr->egress) {
491                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
492                 rte_flow_error_set(error, EINVAL,
493                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
494                                    attr, "Not support egress.");
495                 return -rte_errno;
496         }
497
498         if (attr->priority > 0xFFFF) {
499                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
500                 rte_flow_error_set(error, EINVAL,
501                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
502                                    attr, "Error priority.");
503                 return -rte_errno;
504         }
505         filter->priority = (uint16_t)attr->priority;
506         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
507             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
508             filter->priority = 1;
509
510         return 0;
511 }
512
513 /* a specific function for ixgbe because the flags is specific */
514 static int
515 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
516                           const struct rte_flow_attr *attr,
517                           const struct rte_flow_item pattern[],
518                           const struct rte_flow_action actions[],
519                           struct rte_eth_ntuple_filter *filter,
520                           struct rte_flow_error *error)
521 {
522         int ret;
523         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
524
525         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
526
527         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
528
529         if (ret)
530                 return ret;
531
532 #ifdef RTE_LIBRTE_SECURITY
533         /* ESP flow not really a flow*/
534         if (filter->proto == IPPROTO_ESP)
535                 return 0;
536 #endif
537
538         /* Ixgbe doesn't support tcp flags. */
539         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
540                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
541                 rte_flow_error_set(error, EINVAL,
542                                    RTE_FLOW_ERROR_TYPE_ITEM,
543                                    NULL, "Not supported by ntuple filter");
544                 return -rte_errno;
545         }
546
547         /* Ixgbe doesn't support many priorities. */
548         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
549             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
550                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
551                 rte_flow_error_set(error, EINVAL,
552                         RTE_FLOW_ERROR_TYPE_ITEM,
553                         NULL, "Priority not supported by ntuple filter");
554                 return -rte_errno;
555         }
556
557         if (filter->queue >= dev->data->nb_rx_queues)
558                 return -rte_errno;
559
560         /* fixed value for ixgbe */
561         filter->flags = RTE_5TUPLE_FLAGS;
562         return 0;
563 }
564
565 /**
566  * Parse the rule to see if it is a ethertype rule.
567  * And get the ethertype filter info BTW.
568  * pattern:
569  * The first not void item can be ETH.
570  * The next not void item must be END.
571  * action:
572  * The first not void action should be QUEUE.
573  * The next not void action should be END.
574  * pattern example:
575  * ITEM         Spec                    Mask
576  * ETH          type    0x0807          0xFFFF
577  * END
578  * other members in mask and spec should set to 0x00.
579  * item->last should be NULL.
580  */
581 static int
582 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
583                             const struct rte_flow_item *pattern,
584                             const struct rte_flow_action *actions,
585                             struct rte_eth_ethertype_filter *filter,
586                             struct rte_flow_error *error)
587 {
588         const struct rte_flow_item *item;
589         const struct rte_flow_action *act;
590         const struct rte_flow_item_eth *eth_spec;
591         const struct rte_flow_item_eth *eth_mask;
592         const struct rte_flow_action_queue *act_q;
593
594         if (!pattern) {
595                 rte_flow_error_set(error, EINVAL,
596                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
597                                 NULL, "NULL pattern.");
598                 return -rte_errno;
599         }
600
601         if (!actions) {
602                 rte_flow_error_set(error, EINVAL,
603                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
604                                 NULL, "NULL action.");
605                 return -rte_errno;
606         }
607
608         if (!attr) {
609                 rte_flow_error_set(error, EINVAL,
610                                    RTE_FLOW_ERROR_TYPE_ATTR,
611                                    NULL, "NULL attribute.");
612                 return -rte_errno;
613         }
614
615         item = next_no_void_pattern(pattern, NULL);
616         /* The first non-void item should be MAC. */
617         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
618                 rte_flow_error_set(error, EINVAL,
619                         RTE_FLOW_ERROR_TYPE_ITEM,
620                         item, "Not supported by ethertype filter");
621                 return -rte_errno;
622         }
623
624         /*Not supported last point for range*/
625         if (item->last) {
626                 rte_flow_error_set(error, EINVAL,
627                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
628                         item, "Not supported last point for range");
629                 return -rte_errno;
630         }
631
632         /* Get the MAC info. */
633         if (!item->spec || !item->mask) {
634                 rte_flow_error_set(error, EINVAL,
635                                 RTE_FLOW_ERROR_TYPE_ITEM,
636                                 item, "Not supported by ethertype filter");
637                 return -rte_errno;
638         }
639
640         eth_spec = (const struct rte_flow_item_eth *)item->spec;
641         eth_mask = (const struct rte_flow_item_eth *)item->mask;
642
643         /* Mask bits of source MAC address must be full of 0.
644          * Mask bits of destination MAC address must be full
645          * of 1 or full of 0.
646          */
647         if (!is_zero_ether_addr(&eth_mask->src) ||
648             (!is_zero_ether_addr(&eth_mask->dst) &&
649              !is_broadcast_ether_addr(&eth_mask->dst))) {
650                 rte_flow_error_set(error, EINVAL,
651                                 RTE_FLOW_ERROR_TYPE_ITEM,
652                                 item, "Invalid ether address mask");
653                 return -rte_errno;
654         }
655
656         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
657                 rte_flow_error_set(error, EINVAL,
658                                 RTE_FLOW_ERROR_TYPE_ITEM,
659                                 item, "Invalid ethertype mask");
660                 return -rte_errno;
661         }
662
663         /* If mask bits of destination MAC address
664          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
665          */
666         if (is_broadcast_ether_addr(&eth_mask->dst)) {
667                 filter->mac_addr = eth_spec->dst;
668                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
669         } else {
670                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
671         }
672         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
673
674         /* Check if the next non-void item is END. */
675         item = next_no_void_pattern(pattern, item);
676         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
677                 rte_flow_error_set(error, EINVAL,
678                                 RTE_FLOW_ERROR_TYPE_ITEM,
679                                 item, "Not supported by ethertype filter.");
680                 return -rte_errno;
681         }
682
683         /* Parse action */
684
685         act = next_no_void_action(actions, NULL);
686         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
687             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
688                 rte_flow_error_set(error, EINVAL,
689                                 RTE_FLOW_ERROR_TYPE_ACTION,
690                                 act, "Not supported action.");
691                 return -rte_errno;
692         }
693
694         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
695                 act_q = (const struct rte_flow_action_queue *)act->conf;
696                 filter->queue = act_q->index;
697         } else {
698                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
699         }
700
701         /* Check if the next non-void item is END */
702         act = next_no_void_action(actions, act);
703         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
704                 rte_flow_error_set(error, EINVAL,
705                                 RTE_FLOW_ERROR_TYPE_ACTION,
706                                 act, "Not supported action.");
707                 return -rte_errno;
708         }
709
710         /* Parse attr */
711         /* Must be input direction */
712         if (!attr->ingress) {
713                 rte_flow_error_set(error, EINVAL,
714                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
715                                 attr, "Only support ingress.");
716                 return -rte_errno;
717         }
718
719         /* Not supported */
720         if (attr->egress) {
721                 rte_flow_error_set(error, EINVAL,
722                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
723                                 attr, "Not support egress.");
724                 return -rte_errno;
725         }
726
727         /* Not supported */
728         if (attr->priority) {
729                 rte_flow_error_set(error, EINVAL,
730                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
731                                 attr, "Not support priority.");
732                 return -rte_errno;
733         }
734
735         /* Not supported */
736         if (attr->group) {
737                 rte_flow_error_set(error, EINVAL,
738                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
739                                 attr, "Not support group.");
740                 return -rte_errno;
741         }
742
743         return 0;
744 }
745
746 static int
747 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
748                                  const struct rte_flow_attr *attr,
749                              const struct rte_flow_item pattern[],
750                              const struct rte_flow_action actions[],
751                              struct rte_eth_ethertype_filter *filter,
752                              struct rte_flow_error *error)
753 {
754         int ret;
755         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
756
757         MAC_TYPE_FILTER_SUP(hw->mac.type);
758
759         ret = cons_parse_ethertype_filter(attr, pattern,
760                                         actions, filter, error);
761
762         if (ret)
763                 return ret;
764
765         /* Ixgbe doesn't support MAC address. */
766         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
767                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
768                 rte_flow_error_set(error, EINVAL,
769                         RTE_FLOW_ERROR_TYPE_ITEM,
770                         NULL, "Not supported by ethertype filter");
771                 return -rte_errno;
772         }
773
774         if (filter->queue >= dev->data->nb_rx_queues) {
775                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
776                 rte_flow_error_set(error, EINVAL,
777                         RTE_FLOW_ERROR_TYPE_ITEM,
778                         NULL, "queue index much too big");
779                 return -rte_errno;
780         }
781
782         if (filter->ether_type == ETHER_TYPE_IPv4 ||
783                 filter->ether_type == ETHER_TYPE_IPv6) {
784                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
785                 rte_flow_error_set(error, EINVAL,
786                         RTE_FLOW_ERROR_TYPE_ITEM,
787                         NULL, "IPv4/IPv6 not supported by ethertype filter");
788                 return -rte_errno;
789         }
790
791         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
792                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
793                 rte_flow_error_set(error, EINVAL,
794                         RTE_FLOW_ERROR_TYPE_ITEM,
795                         NULL, "mac compare is unsupported");
796                 return -rte_errno;
797         }
798
799         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
800                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
801                 rte_flow_error_set(error, EINVAL,
802                         RTE_FLOW_ERROR_TYPE_ITEM,
803                         NULL, "drop option is unsupported");
804                 return -rte_errno;
805         }
806
807         return 0;
808 }
809
810 /**
811  * Parse the rule to see if it is a TCP SYN rule.
812  * And get the TCP SYN filter info BTW.
813  * pattern:
814  * The first not void item must be ETH.
815  * The second not void item must be IPV4 or IPV6.
816  * The third not void item must be TCP.
817  * The next not void item must be END.
818  * action:
819  * The first not void action should be QUEUE.
820  * The next not void action should be END.
821  * pattern example:
822  * ITEM         Spec                    Mask
823  * ETH          NULL                    NULL
824  * IPV4/IPV6    NULL                    NULL
825  * TCP          tcp_flags       0x02    0xFF
826  * END
827  * other members in mask and spec should set to 0x00.
828  * item->last should be NULL.
829  */
830 static int
831 cons_parse_syn_filter(const struct rte_flow_attr *attr,
832                                 const struct rte_flow_item pattern[],
833                                 const struct rte_flow_action actions[],
834                                 struct rte_eth_syn_filter *filter,
835                                 struct rte_flow_error *error)
836 {
837         const struct rte_flow_item *item;
838         const struct rte_flow_action *act;
839         const struct rte_flow_item_tcp *tcp_spec;
840         const struct rte_flow_item_tcp *tcp_mask;
841         const struct rte_flow_action_queue *act_q;
842
843         if (!pattern) {
844                 rte_flow_error_set(error, EINVAL,
845                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
846                                 NULL, "NULL pattern.");
847                 return -rte_errno;
848         }
849
850         if (!actions) {
851                 rte_flow_error_set(error, EINVAL,
852                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
853                                 NULL, "NULL action.");
854                 return -rte_errno;
855         }
856
857         if (!attr) {
858                 rte_flow_error_set(error, EINVAL,
859                                    RTE_FLOW_ERROR_TYPE_ATTR,
860                                    NULL, "NULL attribute.");
861                 return -rte_errno;
862         }
863
864
865         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
866         item = next_no_void_pattern(pattern, NULL);
867         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
868             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
869             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
870             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
871                 rte_flow_error_set(error, EINVAL,
872                                 RTE_FLOW_ERROR_TYPE_ITEM,
873                                 item, "Not supported by syn filter");
874                 return -rte_errno;
875         }
876                 /*Not supported last point for range*/
877         if (item->last) {
878                 rte_flow_error_set(error, EINVAL,
879                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
880                         item, "Not supported last point for range");
881                 return -rte_errno;
882         }
883
884         /* Skip Ethernet */
885         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
886                 /* if the item is MAC, the content should be NULL */
887                 if (item->spec || item->mask) {
888                         rte_flow_error_set(error, EINVAL,
889                                 RTE_FLOW_ERROR_TYPE_ITEM,
890                                 item, "Invalid SYN address mask");
891                         return -rte_errno;
892                 }
893
894                 /* check if the next not void item is IPv4 or IPv6 */
895                 item = next_no_void_pattern(pattern, item);
896                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
897                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
898                         rte_flow_error_set(error, EINVAL,
899                                 RTE_FLOW_ERROR_TYPE_ITEM,
900                                 item, "Not supported by syn filter");
901                         return -rte_errno;
902                 }
903         }
904
905         /* Skip IP */
906         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
907             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
908                 /* if the item is IP, the content should be NULL */
909                 if (item->spec || item->mask) {
910                         rte_flow_error_set(error, EINVAL,
911                                 RTE_FLOW_ERROR_TYPE_ITEM,
912                                 item, "Invalid SYN mask");
913                         return -rte_errno;
914                 }
915
916                 /* check if the next not void item is TCP */
917                 item = next_no_void_pattern(pattern, item);
918                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
919                         rte_flow_error_set(error, EINVAL,
920                                 RTE_FLOW_ERROR_TYPE_ITEM,
921                                 item, "Not supported by syn filter");
922                         return -rte_errno;
923                 }
924         }
925
926         /* Get the TCP info. Only support SYN. */
927         if (!item->spec || !item->mask) {
928                 rte_flow_error_set(error, EINVAL,
929                                 RTE_FLOW_ERROR_TYPE_ITEM,
930                                 item, "Invalid SYN mask");
931                 return -rte_errno;
932         }
933         /*Not supported last point for range*/
934         if (item->last) {
935                 rte_flow_error_set(error, EINVAL,
936                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
937                         item, "Not supported last point for range");
938                 return -rte_errno;
939         }
940
941         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
942         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
943         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
944             tcp_mask->hdr.src_port ||
945             tcp_mask->hdr.dst_port ||
946             tcp_mask->hdr.sent_seq ||
947             tcp_mask->hdr.recv_ack ||
948             tcp_mask->hdr.data_off ||
949             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
950             tcp_mask->hdr.rx_win ||
951             tcp_mask->hdr.cksum ||
952             tcp_mask->hdr.tcp_urp) {
953                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
954                 rte_flow_error_set(error, EINVAL,
955                                 RTE_FLOW_ERROR_TYPE_ITEM,
956                                 item, "Not supported by syn filter");
957                 return -rte_errno;
958         }
959
960         /* check if the next not void item is END */
961         item = next_no_void_pattern(pattern, item);
962         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
963                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
964                 rte_flow_error_set(error, EINVAL,
965                                 RTE_FLOW_ERROR_TYPE_ITEM,
966                                 item, "Not supported by syn filter");
967                 return -rte_errno;
968         }
969
970         /* check if the first not void action is QUEUE. */
971         act = next_no_void_action(actions, NULL);
972         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
973                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
974                 rte_flow_error_set(error, EINVAL,
975                                 RTE_FLOW_ERROR_TYPE_ACTION,
976                                 act, "Not supported action.");
977                 return -rte_errno;
978         }
979
980         act_q = (const struct rte_flow_action_queue *)act->conf;
981         filter->queue = act_q->index;
982         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
983                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
984                 rte_flow_error_set(error, EINVAL,
985                                 RTE_FLOW_ERROR_TYPE_ACTION,
986                                 act, "Not supported action.");
987                 return -rte_errno;
988         }
989
990         /* check if the next not void item is END */
991         act = next_no_void_action(actions, act);
992         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
993                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
994                 rte_flow_error_set(error, EINVAL,
995                                 RTE_FLOW_ERROR_TYPE_ACTION,
996                                 act, "Not supported action.");
997                 return -rte_errno;
998         }
999
1000         /* parse attr */
1001         /* must be input direction */
1002         if (!attr->ingress) {
1003                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1004                 rte_flow_error_set(error, EINVAL,
1005                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1006                         attr, "Only support ingress.");
1007                 return -rte_errno;
1008         }
1009
1010         /* not supported */
1011         if (attr->egress) {
1012                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1013                 rte_flow_error_set(error, EINVAL,
1014                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1015                         attr, "Not support egress.");
1016                 return -rte_errno;
1017         }
1018
1019         /* Support 2 priorities, the lowest or highest. */
1020         if (!attr->priority) {
1021                 filter->hig_pri = 0;
1022         } else if (attr->priority == (uint32_t)~0U) {
1023                 filter->hig_pri = 1;
1024         } else {
1025                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1026                 rte_flow_error_set(error, EINVAL,
1027                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1028                         attr, "Not support priority.");
1029                 return -rte_errno;
1030         }
1031
1032         return 0;
1033 }
1034
1035 static int
1036 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1037                                  const struct rte_flow_attr *attr,
1038                              const struct rte_flow_item pattern[],
1039                              const struct rte_flow_action actions[],
1040                              struct rte_eth_syn_filter *filter,
1041                              struct rte_flow_error *error)
1042 {
1043         int ret;
1044         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1045
1046         MAC_TYPE_FILTER_SUP(hw->mac.type);
1047
1048         ret = cons_parse_syn_filter(attr, pattern,
1049                                         actions, filter, error);
1050
1051         if (filter->queue >= dev->data->nb_rx_queues)
1052                 return -rte_errno;
1053
1054         if (ret)
1055                 return ret;
1056
1057         return 0;
1058 }
1059
1060 /**
1061  * Parse the rule to see if it is a L2 tunnel rule.
1062  * And get the L2 tunnel filter info BTW.
1063  * Only support E-tag now.
1064  * pattern:
1065  * The first not void item can be E_TAG.
1066  * The next not void item must be END.
1067  * action:
1068  * The first not void action should be VF or PF.
1069  * The next not void action should be END.
1070  * pattern example:
1071  * ITEM         Spec                    Mask
1072  * E_TAG        grp             0x1     0x3
1073                 e_cid_base      0x309   0xFFF
1074  * END
1075  * other members in mask and spec should set to 0x00.
1076  * item->last should be NULL.
1077  */
1078 static int
1079 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1080                         const struct rte_flow_attr *attr,
1081                         const struct rte_flow_item pattern[],
1082                         const struct rte_flow_action actions[],
1083                         struct rte_eth_l2_tunnel_conf *filter,
1084                         struct rte_flow_error *error)
1085 {
1086         const struct rte_flow_item *item;
1087         const struct rte_flow_item_e_tag *e_tag_spec;
1088         const struct rte_flow_item_e_tag *e_tag_mask;
1089         const struct rte_flow_action *act;
1090         const struct rte_flow_action_vf *act_vf;
1091         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1092
1093         if (!pattern) {
1094                 rte_flow_error_set(error, EINVAL,
1095                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1096                         NULL, "NULL pattern.");
1097                 return -rte_errno;
1098         }
1099
1100         if (!actions) {
1101                 rte_flow_error_set(error, EINVAL,
1102                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1103                                    NULL, "NULL action.");
1104                 return -rte_errno;
1105         }
1106
1107         if (!attr) {
1108                 rte_flow_error_set(error, EINVAL,
1109                                    RTE_FLOW_ERROR_TYPE_ATTR,
1110                                    NULL, "NULL attribute.");
1111                 return -rte_errno;
1112         }
1113
1114         /* The first not void item should be e-tag. */
1115         item = next_no_void_pattern(pattern, NULL);
1116         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1117                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1118                 rte_flow_error_set(error, EINVAL,
1119                         RTE_FLOW_ERROR_TYPE_ITEM,
1120                         item, "Not supported by L2 tunnel filter");
1121                 return -rte_errno;
1122         }
1123
1124         if (!item->spec || !item->mask) {
1125                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1126                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1127                         item, "Not supported by L2 tunnel filter");
1128                 return -rte_errno;
1129         }
1130
1131         /*Not supported last point for range*/
1132         if (item->last) {
1133                 rte_flow_error_set(error, EINVAL,
1134                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1135                         item, "Not supported last point for range");
1136                 return -rte_errno;
1137         }
1138
1139         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1140         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1141
1142         /* Only care about GRP and E cid base. */
1143         if (e_tag_mask->epcp_edei_in_ecid_b ||
1144             e_tag_mask->in_ecid_e ||
1145             e_tag_mask->ecid_e ||
1146             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1147                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1148                 rte_flow_error_set(error, EINVAL,
1149                         RTE_FLOW_ERROR_TYPE_ITEM,
1150                         item, "Not supported by L2 tunnel filter");
1151                 return -rte_errno;
1152         }
1153
1154         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1155         /**
1156          * grp and e_cid_base are bit fields and only use 14 bits.
1157          * e-tag id is taken as little endian by HW.
1158          */
1159         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1160
1161         /* check if the next not void item is END */
1162         item = next_no_void_pattern(pattern, item);
1163         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1164                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1165                 rte_flow_error_set(error, EINVAL,
1166                         RTE_FLOW_ERROR_TYPE_ITEM,
1167                         item, "Not supported by L2 tunnel filter");
1168                 return -rte_errno;
1169         }
1170
1171         /* parse attr */
1172         /* must be input direction */
1173         if (!attr->ingress) {
1174                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1175                 rte_flow_error_set(error, EINVAL,
1176                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1177                         attr, "Only support ingress.");
1178                 return -rte_errno;
1179         }
1180
1181         /* not supported */
1182         if (attr->egress) {
1183                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1184                 rte_flow_error_set(error, EINVAL,
1185                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1186                         attr, "Not support egress.");
1187                 return -rte_errno;
1188         }
1189
1190         /* not supported */
1191         if (attr->priority) {
1192                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1193                 rte_flow_error_set(error, EINVAL,
1194                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1195                         attr, "Not support priority.");
1196                 return -rte_errno;
1197         }
1198
1199         /* check if the first not void action is VF or PF. */
1200         act = next_no_void_action(actions, NULL);
1201         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1202                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1203                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1204                 rte_flow_error_set(error, EINVAL,
1205                         RTE_FLOW_ERROR_TYPE_ACTION,
1206                         act, "Not supported action.");
1207                 return -rte_errno;
1208         }
1209
1210         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1211                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1212                 filter->pool = act_vf->id;
1213         } else {
1214                 filter->pool = pci_dev->max_vfs;
1215         }
1216
1217         /* check if the next not void item is END */
1218         act = next_no_void_action(actions, act);
1219         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1220                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1221                 rte_flow_error_set(error, EINVAL,
1222                         RTE_FLOW_ERROR_TYPE_ACTION,
1223                         act, "Not supported action.");
1224                 return -rte_errno;
1225         }
1226
1227         return 0;
1228 }
1229
1230 static int
1231 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1232                         const struct rte_flow_attr *attr,
1233                         const struct rte_flow_item pattern[],
1234                         const struct rte_flow_action actions[],
1235                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1236                         struct rte_flow_error *error)
1237 {
1238         int ret = 0;
1239         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1240         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1241         uint16_t vf_num;
1242
1243         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1244                                 actions, l2_tn_filter, error);
1245
1246         if (hw->mac.type != ixgbe_mac_X550 &&
1247                 hw->mac.type != ixgbe_mac_X550EM_x &&
1248                 hw->mac.type != ixgbe_mac_X550EM_a) {
1249                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1250                 rte_flow_error_set(error, EINVAL,
1251                         RTE_FLOW_ERROR_TYPE_ITEM,
1252                         NULL, "Not supported by L2 tunnel filter");
1253                 return -rte_errno;
1254         }
1255
1256         vf_num = pci_dev->max_vfs;
1257
1258         if (l2_tn_filter->pool > vf_num)
1259                 return -rte_errno;
1260
1261         return ret;
1262 }
1263
1264 /* Parse to get the attr and action info of flow director rule. */
1265 static int
1266 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1267                           const struct rte_flow_action actions[],
1268                           struct ixgbe_fdir_rule *rule,
1269                           struct rte_flow_error *error)
1270 {
1271         const struct rte_flow_action *act;
1272         const struct rte_flow_action_queue *act_q;
1273         const struct rte_flow_action_mark *mark;
1274
1275         /* parse attr */
1276         /* must be input direction */
1277         if (!attr->ingress) {
1278                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1279                 rte_flow_error_set(error, EINVAL,
1280                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1281                         attr, "Only support ingress.");
1282                 return -rte_errno;
1283         }
1284
1285         /* not supported */
1286         if (attr->egress) {
1287                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1288                 rte_flow_error_set(error, EINVAL,
1289                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1290                         attr, "Not support egress.");
1291                 return -rte_errno;
1292         }
1293
1294         /* not supported */
1295         if (attr->priority) {
1296                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1297                 rte_flow_error_set(error, EINVAL,
1298                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1299                         attr, "Not support priority.");
1300                 return -rte_errno;
1301         }
1302
1303         /* check if the first not void action is QUEUE or DROP. */
1304         act = next_no_void_action(actions, NULL);
1305         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1306             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1307                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1308                 rte_flow_error_set(error, EINVAL,
1309                         RTE_FLOW_ERROR_TYPE_ACTION,
1310                         act, "Not supported action.");
1311                 return -rte_errno;
1312         }
1313
1314         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1315                 act_q = (const struct rte_flow_action_queue *)act->conf;
1316                 rule->queue = act_q->index;
1317         } else { /* drop */
1318                 /* signature mode does not support drop action. */
1319                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1320                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1321                         rte_flow_error_set(error, EINVAL,
1322                                 RTE_FLOW_ERROR_TYPE_ACTION,
1323                                 act, "Not supported action.");
1324                         return -rte_errno;
1325                 }
1326                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1327         }
1328
1329         /* check if the next not void item is MARK */
1330         act = next_no_void_action(actions, act);
1331         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1332                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1333                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1334                 rte_flow_error_set(error, EINVAL,
1335                         RTE_FLOW_ERROR_TYPE_ACTION,
1336                         act, "Not supported action.");
1337                 return -rte_errno;
1338         }
1339
1340         rule->soft_id = 0;
1341
1342         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1343                 mark = (const struct rte_flow_action_mark *)act->conf;
1344                 rule->soft_id = mark->id;
1345                 act = next_no_void_action(actions, act);
1346         }
1347
1348         /* check if the next not void item is END */
1349         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1350                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1351                 rte_flow_error_set(error, EINVAL,
1352                         RTE_FLOW_ERROR_TYPE_ACTION,
1353                         act, "Not supported action.");
1354                 return -rte_errno;
1355         }
1356
1357         return 0;
1358 }
1359
1360 /* search next no void pattern and skip fuzzy */
1361 static inline
1362 const struct rte_flow_item *next_no_fuzzy_pattern(
1363                 const struct rte_flow_item pattern[],
1364                 const struct rte_flow_item *cur)
1365 {
1366         const struct rte_flow_item *next =
1367                 next_no_void_pattern(pattern, cur);
1368         while (1) {
1369                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1370                         return next;
1371                 next = next_no_void_pattern(pattern, next);
1372         }
1373 }
1374
1375 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1376 {
1377         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1378         const struct rte_flow_item *item;
1379         uint32_t sh, lh, mh;
1380         int i = 0;
1381
1382         while (1) {
1383                 item = pattern + i;
1384                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1385                         break;
1386
1387                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1388                         spec =
1389                         (const struct rte_flow_item_fuzzy *)item->spec;
1390                         last =
1391                         (const struct rte_flow_item_fuzzy *)item->last;
1392                         mask =
1393                         (const struct rte_flow_item_fuzzy *)item->mask;
1394
1395                         if (!spec || !mask)
1396                                 return 0;
1397
1398                         sh = spec->thresh;
1399
1400                         if (!last)
1401                                 lh = sh;
1402                         else
1403                                 lh = last->thresh;
1404
1405                         mh = mask->thresh;
1406                         sh = sh & mh;
1407                         lh = lh & mh;
1408
1409                         if (!sh || sh > lh)
1410                                 return 0;
1411
1412                         return 1;
1413                 }
1414
1415                 i++;
1416         }
1417
1418         return 0;
1419 }
1420
1421 /**
1422  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1423  * And get the flow director filter info BTW.
1424  * UDP/TCP/SCTP PATTERN:
1425  * The first not void item can be ETH or IPV4 or IPV6
1426  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1427  * The next not void item could be UDP or TCP or SCTP (optional)
1428  * The next not void item could be RAW (for flexbyte, optional)
1429  * The next not void item must be END.
1430  * A Fuzzy Match pattern can appear at any place before END.
1431  * Fuzzy Match is optional for IPV4 but is required for IPV6
1432  * MAC VLAN PATTERN:
1433  * The first not void item must be ETH.
1434  * The second not void item must be MAC VLAN.
1435  * The next not void item must be END.
1436  * ACTION:
1437  * The first not void action should be QUEUE or DROP.
1438  * The second not void optional action should be MARK,
1439  * mark_id is a uint32_t number.
1440  * The next not void action should be END.
1441  * UDP/TCP/SCTP pattern example:
1442  * ITEM         Spec                    Mask
1443  * ETH          NULL                    NULL
1444  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1445  *              dst_addr 192.167.3.50   0xFFFFFFFF
1446  * UDP/TCP/SCTP src_port        80      0xFFFF
1447  *              dst_port        80      0xFFFF
1448  * FLEX relative        0       0x1
1449  *              search          0       0x1
1450  *              reserved        0       0
1451  *              offset          12      0xFFFFFFFF
1452  *              limit           0       0xFFFF
1453  *              length          2       0xFFFF
1454  *              pattern[0]      0x86    0xFF
1455  *              pattern[1]      0xDD    0xFF
1456  * END
1457  * MAC VLAN pattern example:
1458  * ITEM         Spec                    Mask
1459  * ETH          dst_addr
1460                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1461                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1462  * MAC VLAN     tci     0x2016          0xEFFF
1463  * END
1464  * Other members in mask and spec should set to 0x00.
1465  * Item->last should be NULL.
1466  */
1467 static int
1468 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1469                                const struct rte_flow_attr *attr,
1470                                const struct rte_flow_item pattern[],
1471                                const struct rte_flow_action actions[],
1472                                struct ixgbe_fdir_rule *rule,
1473                                struct rte_flow_error *error)
1474 {
1475         const struct rte_flow_item *item;
1476         const struct rte_flow_item_eth *eth_spec;
1477         const struct rte_flow_item_eth *eth_mask;
1478         const struct rte_flow_item_ipv4 *ipv4_spec;
1479         const struct rte_flow_item_ipv4 *ipv4_mask;
1480         const struct rte_flow_item_ipv6 *ipv6_spec;
1481         const struct rte_flow_item_ipv6 *ipv6_mask;
1482         const struct rte_flow_item_tcp *tcp_spec;
1483         const struct rte_flow_item_tcp *tcp_mask;
1484         const struct rte_flow_item_udp *udp_spec;
1485         const struct rte_flow_item_udp *udp_mask;
1486         const struct rte_flow_item_sctp *sctp_spec;
1487         const struct rte_flow_item_sctp *sctp_mask;
1488         const struct rte_flow_item_vlan *vlan_spec;
1489         const struct rte_flow_item_vlan *vlan_mask;
1490         const struct rte_flow_item_raw *raw_mask;
1491         const struct rte_flow_item_raw *raw_spec;
1492         uint8_t j;
1493
1494         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1495
1496         if (!pattern) {
1497                 rte_flow_error_set(error, EINVAL,
1498                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1499                         NULL, "NULL pattern.");
1500                 return -rte_errno;
1501         }
1502
1503         if (!actions) {
1504                 rte_flow_error_set(error, EINVAL,
1505                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1506                                    NULL, "NULL action.");
1507                 return -rte_errno;
1508         }
1509
1510         if (!attr) {
1511                 rte_flow_error_set(error, EINVAL,
1512                                    RTE_FLOW_ERROR_TYPE_ATTR,
1513                                    NULL, "NULL attribute.");
1514                 return -rte_errno;
1515         }
1516
1517         /**
1518          * Some fields may not be provided. Set spec to 0 and mask to default
1519          * value. So, we need not do anything for the not provided fields later.
1520          */
1521         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1522         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1523         rule->mask.vlan_tci_mask = 0;
1524         rule->mask.flex_bytes_mask = 0;
1525
1526         /**
1527          * The first not void item should be
1528          * MAC or IPv4 or TCP or UDP or SCTP.
1529          */
1530         item = next_no_fuzzy_pattern(pattern, NULL);
1531         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1532             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1533             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1534             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1535             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1536             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1537                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1538                 rte_flow_error_set(error, EINVAL,
1539                         RTE_FLOW_ERROR_TYPE_ITEM,
1540                         item, "Not supported by fdir filter");
1541                 return -rte_errno;
1542         }
1543
1544         if (signature_match(pattern))
1545                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1546         else
1547                 rule->mode = RTE_FDIR_MODE_PERFECT;
1548
1549         /*Not supported last point for range*/
1550         if (item->last) {
1551                 rte_flow_error_set(error, EINVAL,
1552                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1553                         item, "Not supported last point for range");
1554                 return -rte_errno;
1555         }
1556
1557         /* Get the MAC info. */
1558         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1559                 /**
1560                  * Only support vlan and dst MAC address,
1561                  * others should be masked.
1562                  */
1563                 if (item->spec && !item->mask) {
1564                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1565                         rte_flow_error_set(error, EINVAL,
1566                                 RTE_FLOW_ERROR_TYPE_ITEM,
1567                                 item, "Not supported by fdir filter");
1568                         return -rte_errno;
1569                 }
1570
1571                 if (item->spec) {
1572                         rule->b_spec = TRUE;
1573                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1574
1575                         /* Get the dst MAC. */
1576                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1577                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1578                                         eth_spec->dst.addr_bytes[j];
1579                         }
1580                 }
1581
1582
1583                 if (item->mask) {
1584
1585                         rule->b_mask = TRUE;
1586                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1587
1588                         /* Ether type should be masked. */
1589                         if (eth_mask->type ||
1590                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1591                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1592                                 rte_flow_error_set(error, EINVAL,
1593                                         RTE_FLOW_ERROR_TYPE_ITEM,
1594                                         item, "Not supported by fdir filter");
1595                                 return -rte_errno;
1596                         }
1597
1598                         /* If ethernet has meaning, it means MAC VLAN mode. */
1599                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1600
1601                         /**
1602                          * src MAC address must be masked,
1603                          * and don't support dst MAC address mask.
1604                          */
1605                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1606                                 if (eth_mask->src.addr_bytes[j] ||
1607                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1608                                         memset(rule, 0,
1609                                         sizeof(struct ixgbe_fdir_rule));
1610                                         rte_flow_error_set(error, EINVAL,
1611                                         RTE_FLOW_ERROR_TYPE_ITEM,
1612                                         item, "Not supported by fdir filter");
1613                                         return -rte_errno;
1614                                 }
1615                         }
1616
1617                         /* When no VLAN, considered as full mask. */
1618                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1619                 }
1620                 /*** If both spec and mask are item,
1621                  * it means don't care about ETH.
1622                  * Do nothing.
1623                  */
1624
1625                 /**
1626                  * Check if the next not void item is vlan or ipv4.
1627                  * IPv6 is not supported.
1628                  */
1629                 item = next_no_fuzzy_pattern(pattern, item);
1630                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1631                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1632                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1633                                 rte_flow_error_set(error, EINVAL,
1634                                         RTE_FLOW_ERROR_TYPE_ITEM,
1635                                         item, "Not supported by fdir filter");
1636                                 return -rte_errno;
1637                         }
1638                 } else {
1639                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1640                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1641                                 rte_flow_error_set(error, EINVAL,
1642                                         RTE_FLOW_ERROR_TYPE_ITEM,
1643                                         item, "Not supported by fdir filter");
1644                                 return -rte_errno;
1645                         }
1646                 }
1647         }
1648
1649         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1650                 if (!(item->spec && item->mask)) {
1651                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1652                         rte_flow_error_set(error, EINVAL,
1653                                 RTE_FLOW_ERROR_TYPE_ITEM,
1654                                 item, "Not supported by fdir filter");
1655                         return -rte_errno;
1656                 }
1657
1658                 /*Not supported last point for range*/
1659                 if (item->last) {
1660                         rte_flow_error_set(error, EINVAL,
1661                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1662                                 item, "Not supported last point for range");
1663                         return -rte_errno;
1664                 }
1665
1666                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1667                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1668
1669                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1670
1671                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1672                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1673                 /* More than one tags are not supported. */
1674
1675                 /* Next not void item must be END */
1676                 item = next_no_fuzzy_pattern(pattern, item);
1677                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1678                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1679                         rte_flow_error_set(error, EINVAL,
1680                                 RTE_FLOW_ERROR_TYPE_ITEM,
1681                                 item, "Not supported by fdir filter");
1682                         return -rte_errno;
1683                 }
1684         }
1685
1686         /* Get the IPV4 info. */
1687         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1688                 /**
1689                  * Set the flow type even if there's no content
1690                  * as we must have a flow type.
1691                  */
1692                 rule->ixgbe_fdir.formatted.flow_type =
1693                         IXGBE_ATR_FLOW_TYPE_IPV4;
1694                 /*Not supported last point for range*/
1695                 if (item->last) {
1696                         rte_flow_error_set(error, EINVAL,
1697                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1698                                 item, "Not supported last point for range");
1699                         return -rte_errno;
1700                 }
1701                 /**
1702                  * Only care about src & dst addresses,
1703                  * others should be masked.
1704                  */
1705                 if (!item->mask) {
1706                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1707                         rte_flow_error_set(error, EINVAL,
1708                                 RTE_FLOW_ERROR_TYPE_ITEM,
1709                                 item, "Not supported by fdir filter");
1710                         return -rte_errno;
1711                 }
1712                 rule->b_mask = TRUE;
1713                 ipv4_mask =
1714                         (const struct rte_flow_item_ipv4 *)item->mask;
1715                 if (ipv4_mask->hdr.version_ihl ||
1716                     ipv4_mask->hdr.type_of_service ||
1717                     ipv4_mask->hdr.total_length ||
1718                     ipv4_mask->hdr.packet_id ||
1719                     ipv4_mask->hdr.fragment_offset ||
1720                     ipv4_mask->hdr.time_to_live ||
1721                     ipv4_mask->hdr.next_proto_id ||
1722                     ipv4_mask->hdr.hdr_checksum) {
1723                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1724                         rte_flow_error_set(error, EINVAL,
1725                                 RTE_FLOW_ERROR_TYPE_ITEM,
1726                                 item, "Not supported by fdir filter");
1727                         return -rte_errno;
1728                 }
1729                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1730                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1731
1732                 if (item->spec) {
1733                         rule->b_spec = TRUE;
1734                         ipv4_spec =
1735                                 (const struct rte_flow_item_ipv4 *)item->spec;
1736                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1737                                 ipv4_spec->hdr.dst_addr;
1738                         rule->ixgbe_fdir.formatted.src_ip[0] =
1739                                 ipv4_spec->hdr.src_addr;
1740                 }
1741
1742                 /**
1743                  * Check if the next not void item is
1744                  * TCP or UDP or SCTP or END.
1745                  */
1746                 item = next_no_fuzzy_pattern(pattern, item);
1747                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1748                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1749                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1750                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1751                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1752                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1753                         rte_flow_error_set(error, EINVAL,
1754                                 RTE_FLOW_ERROR_TYPE_ITEM,
1755                                 item, "Not supported by fdir filter");
1756                         return -rte_errno;
1757                 }
1758         }
1759
1760         /* Get the IPV6 info. */
1761         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1762                 /**
1763                  * Set the flow type even if there's no content
1764                  * as we must have a flow type.
1765                  */
1766                 rule->ixgbe_fdir.formatted.flow_type =
1767                         IXGBE_ATR_FLOW_TYPE_IPV6;
1768
1769                 /**
1770                  * 1. must signature match
1771                  * 2. not support last
1772                  * 3. mask must not null
1773                  */
1774                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1775                     item->last ||
1776                     !item->mask) {
1777                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1778                         rte_flow_error_set(error, EINVAL,
1779                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1780                                 item, "Not supported last point for range");
1781                         return -rte_errno;
1782                 }
1783
1784                 rule->b_mask = TRUE;
1785                 ipv6_mask =
1786                         (const struct rte_flow_item_ipv6 *)item->mask;
1787                 if (ipv6_mask->hdr.vtc_flow ||
1788                     ipv6_mask->hdr.payload_len ||
1789                     ipv6_mask->hdr.proto ||
1790                     ipv6_mask->hdr.hop_limits) {
1791                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1792                         rte_flow_error_set(error, EINVAL,
1793                                 RTE_FLOW_ERROR_TYPE_ITEM,
1794                                 item, "Not supported by fdir filter");
1795                         return -rte_errno;
1796                 }
1797
1798                 /* check src addr mask */
1799                 for (j = 0; j < 16; j++) {
1800                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1801                                 rule->mask.src_ipv6_mask |= 1 << j;
1802                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1803                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1804                                 rte_flow_error_set(error, EINVAL,
1805                                         RTE_FLOW_ERROR_TYPE_ITEM,
1806                                         item, "Not supported by fdir filter");
1807                                 return -rte_errno;
1808                         }
1809                 }
1810
1811                 /* check dst addr mask */
1812                 for (j = 0; j < 16; j++) {
1813                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1814                                 rule->mask.dst_ipv6_mask |= 1 << j;
1815                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1816                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1817                                 rte_flow_error_set(error, EINVAL,
1818                                         RTE_FLOW_ERROR_TYPE_ITEM,
1819                                         item, "Not supported by fdir filter");
1820                                 return -rte_errno;
1821                         }
1822                 }
1823
1824                 if (item->spec) {
1825                         rule->b_spec = TRUE;
1826                         ipv6_spec =
1827                                 (const struct rte_flow_item_ipv6 *)item->spec;
1828                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1829                                    ipv6_spec->hdr.src_addr, 16);
1830                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1831                                    ipv6_spec->hdr.dst_addr, 16);
1832                 }
1833
1834                 /**
1835                  * Check if the next not void item is
1836                  * TCP or UDP or SCTP or END.
1837                  */
1838                 item = next_no_fuzzy_pattern(pattern, item);
1839                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1840                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1841                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1842                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1843                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1844                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1845                         rte_flow_error_set(error, EINVAL,
1846                                 RTE_FLOW_ERROR_TYPE_ITEM,
1847                                 item, "Not supported by fdir filter");
1848                         return -rte_errno;
1849                 }
1850         }
1851
1852         /* Get the TCP info. */
1853         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1854                 /**
1855                  * Set the flow type even if there's no content
1856                  * as we must have a flow type.
1857                  */
1858                 rule->ixgbe_fdir.formatted.flow_type |=
1859                         IXGBE_ATR_L4TYPE_TCP;
1860                 /*Not supported last point for range*/
1861                 if (item->last) {
1862                         rte_flow_error_set(error, EINVAL,
1863                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1864                                 item, "Not supported last point for range");
1865                         return -rte_errno;
1866                 }
1867                 /**
1868                  * Only care about src & dst ports,
1869                  * others should be masked.
1870                  */
1871                 if (!item->mask) {
1872                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1873                         rte_flow_error_set(error, EINVAL,
1874                                 RTE_FLOW_ERROR_TYPE_ITEM,
1875                                 item, "Not supported by fdir filter");
1876                         return -rte_errno;
1877                 }
1878                 rule->b_mask = TRUE;
1879                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1880                 if (tcp_mask->hdr.sent_seq ||
1881                     tcp_mask->hdr.recv_ack ||
1882                     tcp_mask->hdr.data_off ||
1883                     tcp_mask->hdr.tcp_flags ||
1884                     tcp_mask->hdr.rx_win ||
1885                     tcp_mask->hdr.cksum ||
1886                     tcp_mask->hdr.tcp_urp) {
1887                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1888                         rte_flow_error_set(error, EINVAL,
1889                                 RTE_FLOW_ERROR_TYPE_ITEM,
1890                                 item, "Not supported by fdir filter");
1891                         return -rte_errno;
1892                 }
1893                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1894                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1895
1896                 if (item->spec) {
1897                         rule->b_spec = TRUE;
1898                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1899                         rule->ixgbe_fdir.formatted.src_port =
1900                                 tcp_spec->hdr.src_port;
1901                         rule->ixgbe_fdir.formatted.dst_port =
1902                                 tcp_spec->hdr.dst_port;
1903                 }
1904
1905                 item = next_no_fuzzy_pattern(pattern, item);
1906                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1907                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1908                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1909                         rte_flow_error_set(error, EINVAL,
1910                                 RTE_FLOW_ERROR_TYPE_ITEM,
1911                                 item, "Not supported by fdir filter");
1912                         return -rte_errno;
1913                 }
1914
1915         }
1916
1917         /* Get the UDP info */
1918         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1919                 /**
1920                  * Set the flow type even if there's no content
1921                  * as we must have a flow type.
1922                  */
1923                 rule->ixgbe_fdir.formatted.flow_type |=
1924                         IXGBE_ATR_L4TYPE_UDP;
1925                 /*Not supported last point for range*/
1926                 if (item->last) {
1927                         rte_flow_error_set(error, EINVAL,
1928                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1929                                 item, "Not supported last point for range");
1930                         return -rte_errno;
1931                 }
1932                 /**
1933                  * Only care about src & dst ports,
1934                  * others should be masked.
1935                  */
1936                 if (!item->mask) {
1937                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1938                         rte_flow_error_set(error, EINVAL,
1939                                 RTE_FLOW_ERROR_TYPE_ITEM,
1940                                 item, "Not supported by fdir filter");
1941                         return -rte_errno;
1942                 }
1943                 rule->b_mask = TRUE;
1944                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1945                 if (udp_mask->hdr.dgram_len ||
1946                     udp_mask->hdr.dgram_cksum) {
1947                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1948                         rte_flow_error_set(error, EINVAL,
1949                                 RTE_FLOW_ERROR_TYPE_ITEM,
1950                                 item, "Not supported by fdir filter");
1951                         return -rte_errno;
1952                 }
1953                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1954                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1955
1956                 if (item->spec) {
1957                         rule->b_spec = TRUE;
1958                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1959                         rule->ixgbe_fdir.formatted.src_port =
1960                                 udp_spec->hdr.src_port;
1961                         rule->ixgbe_fdir.formatted.dst_port =
1962                                 udp_spec->hdr.dst_port;
1963                 }
1964
1965                 item = next_no_fuzzy_pattern(pattern, item);
1966                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1967                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1968                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1969                         rte_flow_error_set(error, EINVAL,
1970                                 RTE_FLOW_ERROR_TYPE_ITEM,
1971                                 item, "Not supported by fdir filter");
1972                         return -rte_errno;
1973                 }
1974
1975         }
1976
1977         /* Get the SCTP info */
1978         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1979                 /**
1980                  * Set the flow type even if there's no content
1981                  * as we must have a flow type.
1982                  */
1983                 rule->ixgbe_fdir.formatted.flow_type |=
1984                         IXGBE_ATR_L4TYPE_SCTP;
1985                 /*Not supported last point for range*/
1986                 if (item->last) {
1987                         rte_flow_error_set(error, EINVAL,
1988                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1989                                 item, "Not supported last point for range");
1990                         return -rte_errno;
1991                 }
1992
1993                 /* only x550 family only support sctp port */
1994                 if (hw->mac.type == ixgbe_mac_X550 ||
1995                     hw->mac.type == ixgbe_mac_X550EM_x ||
1996                     hw->mac.type == ixgbe_mac_X550EM_a) {
1997                         /**
1998                          * Only care about src & dst ports,
1999                          * others should be masked.
2000                          */
2001                         if (!item->mask) {
2002                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2003                                 rte_flow_error_set(error, EINVAL,
2004                                         RTE_FLOW_ERROR_TYPE_ITEM,
2005                                         item, "Not supported by fdir filter");
2006                                 return -rte_errno;
2007                         }
2008                         rule->b_mask = TRUE;
2009                         sctp_mask =
2010                                 (const struct rte_flow_item_sctp *)item->mask;
2011                         if (sctp_mask->hdr.tag ||
2012                                 sctp_mask->hdr.cksum) {
2013                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2014                                 rte_flow_error_set(error, EINVAL,
2015                                         RTE_FLOW_ERROR_TYPE_ITEM,
2016                                         item, "Not supported by fdir filter");
2017                                 return -rte_errno;
2018                         }
2019                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2020                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2021
2022                         if (item->spec) {
2023                                 rule->b_spec = TRUE;
2024                                 sctp_spec =
2025                                 (const struct rte_flow_item_sctp *)item->spec;
2026                                 rule->ixgbe_fdir.formatted.src_port =
2027                                         sctp_spec->hdr.src_port;
2028                                 rule->ixgbe_fdir.formatted.dst_port =
2029                                         sctp_spec->hdr.dst_port;
2030                         }
2031                 /* others even sctp port is not supported */
2032                 } else {
2033                         sctp_mask =
2034                                 (const struct rte_flow_item_sctp *)item->mask;
2035                         if (sctp_mask &&
2036                                 (sctp_mask->hdr.src_port ||
2037                                  sctp_mask->hdr.dst_port ||
2038                                  sctp_mask->hdr.tag ||
2039                                  sctp_mask->hdr.cksum)) {
2040                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2041                                 rte_flow_error_set(error, EINVAL,
2042                                         RTE_FLOW_ERROR_TYPE_ITEM,
2043                                         item, "Not supported by fdir filter");
2044                                 return -rte_errno;
2045                         }
2046                 }
2047
2048                 item = next_no_fuzzy_pattern(pattern, item);
2049                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2050                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2051                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2052                         rte_flow_error_set(error, EINVAL,
2053                                 RTE_FLOW_ERROR_TYPE_ITEM,
2054                                 item, "Not supported by fdir filter");
2055                         return -rte_errno;
2056                 }
2057         }
2058
2059         /* Get the flex byte info */
2060         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2061                 /* Not supported last point for range*/
2062                 if (item->last) {
2063                         rte_flow_error_set(error, EINVAL,
2064                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2065                                 item, "Not supported last point for range");
2066                         return -rte_errno;
2067                 }
2068                 /* mask should not be null */
2069                 if (!item->mask || !item->spec) {
2070                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2071                         rte_flow_error_set(error, EINVAL,
2072                                 RTE_FLOW_ERROR_TYPE_ITEM,
2073                                 item, "Not supported by fdir filter");
2074                         return -rte_errno;
2075                 }
2076
2077                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2078
2079                 /* check mask */
2080                 if (raw_mask->relative != 0x1 ||
2081                     raw_mask->search != 0x1 ||
2082                     raw_mask->reserved != 0x0 ||
2083                     (uint32_t)raw_mask->offset != 0xffffffff ||
2084                     raw_mask->limit != 0xffff ||
2085                     raw_mask->length != 0xffff) {
2086                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2087                         rte_flow_error_set(error, EINVAL,
2088                                 RTE_FLOW_ERROR_TYPE_ITEM,
2089                                 item, "Not supported by fdir filter");
2090                         return -rte_errno;
2091                 }
2092
2093                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2094
2095                 /* check spec */
2096                 if (raw_spec->relative != 0 ||
2097                     raw_spec->search != 0 ||
2098                     raw_spec->reserved != 0 ||
2099                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2100                     raw_spec->offset % 2 ||
2101                     raw_spec->limit != 0 ||
2102                     raw_spec->length != 2 ||
2103                     /* pattern can't be 0xffff */
2104                     (raw_spec->pattern[0] == 0xff &&
2105                      raw_spec->pattern[1] == 0xff)) {
2106                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2107                         rte_flow_error_set(error, EINVAL,
2108                                 RTE_FLOW_ERROR_TYPE_ITEM,
2109                                 item, "Not supported by fdir filter");
2110                         return -rte_errno;
2111                 }
2112
2113                 /* check pattern mask */
2114                 if (raw_mask->pattern[0] != 0xff ||
2115                     raw_mask->pattern[1] != 0xff) {
2116                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2117                         rte_flow_error_set(error, EINVAL,
2118                                 RTE_FLOW_ERROR_TYPE_ITEM,
2119                                 item, "Not supported by fdir filter");
2120                         return -rte_errno;
2121                 }
2122
2123                 rule->mask.flex_bytes_mask = 0xffff;
2124                 rule->ixgbe_fdir.formatted.flex_bytes =
2125                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2126                         raw_spec->pattern[0];
2127                 rule->flex_bytes_offset = raw_spec->offset;
2128         }
2129
2130         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2131                 /* check if the next not void item is END */
2132                 item = next_no_fuzzy_pattern(pattern, item);
2133                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2134                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2135                         rte_flow_error_set(error, EINVAL,
2136                                 RTE_FLOW_ERROR_TYPE_ITEM,
2137                                 item, "Not supported by fdir filter");
2138                         return -rte_errno;
2139                 }
2140         }
2141
2142         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2143 }
2144
2145 #define NVGRE_PROTOCOL 0x6558
2146
2147 /**
2148  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2149  * And get the flow director filter info BTW.
2150  * VxLAN PATTERN:
2151  * The first not void item must be ETH.
2152  * The second not void item must be IPV4/ IPV6.
2153  * The third not void item must be NVGRE.
2154  * The next not void item must be END.
2155  * NVGRE PATTERN:
2156  * The first not void item must be ETH.
2157  * The second not void item must be IPV4/ IPV6.
2158  * The third not void item must be NVGRE.
2159  * The next not void item must be END.
2160  * ACTION:
2161  * The first not void action should be QUEUE or DROP.
2162  * The second not void optional action should be MARK,
2163  * mark_id is a uint32_t number.
2164  * The next not void action should be END.
2165  * VxLAN pattern example:
2166  * ITEM         Spec                    Mask
2167  * ETH          NULL                    NULL
2168  * IPV4/IPV6    NULL                    NULL
2169  * UDP          NULL                    NULL
2170  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2171  * MAC VLAN     tci     0x2016          0xEFFF
2172  * END
2173  * NEGRV pattern example:
2174  * ITEM         Spec                    Mask
2175  * ETH          NULL                    NULL
2176  * IPV4/IPV6    NULL                    NULL
2177  * NVGRE        protocol        0x6558  0xFFFF
2178  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2179  * MAC VLAN     tci     0x2016          0xEFFF
2180  * END
2181  * other members in mask and spec should set to 0x00.
2182  * item->last should be NULL.
2183  */
2184 static int
2185 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2186                                const struct rte_flow_item pattern[],
2187                                const struct rte_flow_action actions[],
2188                                struct ixgbe_fdir_rule *rule,
2189                                struct rte_flow_error *error)
2190 {
2191         const struct rte_flow_item *item;
2192         const struct rte_flow_item_vxlan *vxlan_spec;
2193         const struct rte_flow_item_vxlan *vxlan_mask;
2194         const struct rte_flow_item_nvgre *nvgre_spec;
2195         const struct rte_flow_item_nvgre *nvgre_mask;
2196         const struct rte_flow_item_eth *eth_spec;
2197         const struct rte_flow_item_eth *eth_mask;
2198         const struct rte_flow_item_vlan *vlan_spec;
2199         const struct rte_flow_item_vlan *vlan_mask;
2200         uint32_t j;
2201
2202         if (!pattern) {
2203                 rte_flow_error_set(error, EINVAL,
2204                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2205                                    NULL, "NULL pattern.");
2206                 return -rte_errno;
2207         }
2208
2209         if (!actions) {
2210                 rte_flow_error_set(error, EINVAL,
2211                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2212                                    NULL, "NULL action.");
2213                 return -rte_errno;
2214         }
2215
2216         if (!attr) {
2217                 rte_flow_error_set(error, EINVAL,
2218                                    RTE_FLOW_ERROR_TYPE_ATTR,
2219                                    NULL, "NULL attribute.");
2220                 return -rte_errno;
2221         }
2222
2223         /**
2224          * Some fields may not be provided. Set spec to 0 and mask to default
2225          * value. So, we need not do anything for the not provided fields later.
2226          */
2227         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2228         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2229         rule->mask.vlan_tci_mask = 0;
2230
2231         /**
2232          * The first not void item should be
2233          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2234          */
2235         item = next_no_void_pattern(pattern, NULL);
2236         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2237             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2238             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2239             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2240             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2241             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2242                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2243                 rte_flow_error_set(error, EINVAL,
2244                         RTE_FLOW_ERROR_TYPE_ITEM,
2245                         item, "Not supported by fdir filter");
2246                 return -rte_errno;
2247         }
2248
2249         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2250
2251         /* Skip MAC. */
2252         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2253                 /* Only used to describe the protocol stack. */
2254                 if (item->spec || item->mask) {
2255                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2256                         rte_flow_error_set(error, EINVAL,
2257                                 RTE_FLOW_ERROR_TYPE_ITEM,
2258                                 item, "Not supported by fdir filter");
2259                         return -rte_errno;
2260                 }
2261                 /* Not supported last point for range*/
2262                 if (item->last) {
2263                         rte_flow_error_set(error, EINVAL,
2264                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2265                                 item, "Not supported last point for range");
2266                         return -rte_errno;
2267                 }
2268
2269                 /* Check if the next not void item is IPv4 or IPv6. */
2270                 item = next_no_void_pattern(pattern, item);
2271                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2272                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2273                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2274                         rte_flow_error_set(error, EINVAL,
2275                                 RTE_FLOW_ERROR_TYPE_ITEM,
2276                                 item, "Not supported by fdir filter");
2277                         return -rte_errno;
2278                 }
2279         }
2280
2281         /* Skip IP. */
2282         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2283             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2284                 /* Only used to describe the protocol stack. */
2285                 if (item->spec || item->mask) {
2286                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2287                         rte_flow_error_set(error, EINVAL,
2288                                 RTE_FLOW_ERROR_TYPE_ITEM,
2289                                 item, "Not supported by fdir filter");
2290                         return -rte_errno;
2291                 }
2292                 /*Not supported last point for range*/
2293                 if (item->last) {
2294                         rte_flow_error_set(error, EINVAL,
2295                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2296                                 item, "Not supported last point for range");
2297                         return -rte_errno;
2298                 }
2299
2300                 /* Check if the next not void item is UDP or NVGRE. */
2301                 item = next_no_void_pattern(pattern, item);
2302                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2303                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2304                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2305                         rte_flow_error_set(error, EINVAL,
2306                                 RTE_FLOW_ERROR_TYPE_ITEM,
2307                                 item, "Not supported by fdir filter");
2308                         return -rte_errno;
2309                 }
2310         }
2311
2312         /* Skip UDP. */
2313         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2314                 /* Only used to describe the protocol stack. */
2315                 if (item->spec || item->mask) {
2316                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2317                         rte_flow_error_set(error, EINVAL,
2318                                 RTE_FLOW_ERROR_TYPE_ITEM,
2319                                 item, "Not supported by fdir filter");
2320                         return -rte_errno;
2321                 }
2322                 /*Not supported last point for range*/
2323                 if (item->last) {
2324                         rte_flow_error_set(error, EINVAL,
2325                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2326                                 item, "Not supported last point for range");
2327                         return -rte_errno;
2328                 }
2329
2330                 /* Check if the next not void item is VxLAN. */
2331                 item = next_no_void_pattern(pattern, item);
2332                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2333                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2334                         rte_flow_error_set(error, EINVAL,
2335                                 RTE_FLOW_ERROR_TYPE_ITEM,
2336                                 item, "Not supported by fdir filter");
2337                         return -rte_errno;
2338                 }
2339         }
2340
2341         /* Get the VxLAN info */
2342         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2343                 rule->ixgbe_fdir.formatted.tunnel_type =
2344                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2345
2346                 /* Only care about VNI, others should be masked. */
2347                 if (!item->mask) {
2348                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2349                         rte_flow_error_set(error, EINVAL,
2350                                 RTE_FLOW_ERROR_TYPE_ITEM,
2351                                 item, "Not supported by fdir filter");
2352                         return -rte_errno;
2353                 }
2354                 /*Not supported last point for range*/
2355                 if (item->last) {
2356                         rte_flow_error_set(error, EINVAL,
2357                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2358                                 item, "Not supported last point for range");
2359                         return -rte_errno;
2360                 }
2361                 rule->b_mask = TRUE;
2362
2363                 /* Tunnel type is always meaningful. */
2364                 rule->mask.tunnel_type_mask = 1;
2365
2366                 vxlan_mask =
2367                         (const struct rte_flow_item_vxlan *)item->mask;
2368                 if (vxlan_mask->flags) {
2369                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2370                         rte_flow_error_set(error, EINVAL,
2371                                 RTE_FLOW_ERROR_TYPE_ITEM,
2372                                 item, "Not supported by fdir filter");
2373                         return -rte_errno;
2374                 }
2375                 /* VNI must be totally masked or not. */
2376                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2377                         vxlan_mask->vni[2]) &&
2378                         ((vxlan_mask->vni[0] != 0xFF) ||
2379                         (vxlan_mask->vni[1] != 0xFF) ||
2380                                 (vxlan_mask->vni[2] != 0xFF))) {
2381                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2382                         rte_flow_error_set(error, EINVAL,
2383                                 RTE_FLOW_ERROR_TYPE_ITEM,
2384                                 item, "Not supported by fdir filter");
2385                         return -rte_errno;
2386                 }
2387
2388                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2389                         RTE_DIM(vxlan_mask->vni));
2390
2391                 if (item->spec) {
2392                         rule->b_spec = TRUE;
2393                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2394                                         item->spec;
2395                         rte_memcpy(((uint8_t *)
2396                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2397                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2398                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2399                                 rule->ixgbe_fdir.formatted.tni_vni);
2400                 }
2401         }
2402
2403         /* Get the NVGRE info */
2404         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2405                 rule->ixgbe_fdir.formatted.tunnel_type =
2406                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2407
2408                 /**
2409                  * Only care about flags0, flags1, protocol and TNI,
2410                  * others should be masked.
2411                  */
2412                 if (!item->mask) {
2413                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2414                         rte_flow_error_set(error, EINVAL,
2415                                 RTE_FLOW_ERROR_TYPE_ITEM,
2416                                 item, "Not supported by fdir filter");
2417                         return -rte_errno;
2418                 }
2419                 /*Not supported last point for range*/
2420                 if (item->last) {
2421                         rte_flow_error_set(error, EINVAL,
2422                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2423                                 item, "Not supported last point for range");
2424                         return -rte_errno;
2425                 }
2426                 rule->b_mask = TRUE;
2427
2428                 /* Tunnel type is always meaningful. */
2429                 rule->mask.tunnel_type_mask = 1;
2430
2431                 nvgre_mask =
2432                         (const struct rte_flow_item_nvgre *)item->mask;
2433                 if (nvgre_mask->flow_id) {
2434                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2435                         rte_flow_error_set(error, EINVAL,
2436                                 RTE_FLOW_ERROR_TYPE_ITEM,
2437                                 item, "Not supported by fdir filter");
2438                         return -rte_errno;
2439                 }
2440                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2441                         rte_cpu_to_be_16(0x3000) ||
2442                     nvgre_mask->protocol != 0xFFFF) {
2443                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2444                         rte_flow_error_set(error, EINVAL,
2445                                 RTE_FLOW_ERROR_TYPE_ITEM,
2446                                 item, "Not supported by fdir filter");
2447                         return -rte_errno;
2448                 }
2449                 /* TNI must be totally masked or not. */
2450                 if (nvgre_mask->tni[0] &&
2451                     ((nvgre_mask->tni[0] != 0xFF) ||
2452                     (nvgre_mask->tni[1] != 0xFF) ||
2453                     (nvgre_mask->tni[2] != 0xFF))) {
2454                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2455                         rte_flow_error_set(error, EINVAL,
2456                                 RTE_FLOW_ERROR_TYPE_ITEM,
2457                                 item, "Not supported by fdir filter");
2458                         return -rte_errno;
2459                 }
2460                 /* tni is a 24-bits bit field */
2461                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2462                         RTE_DIM(nvgre_mask->tni));
2463                 rule->mask.tunnel_id_mask <<= 8;
2464
2465                 if (item->spec) {
2466                         rule->b_spec = TRUE;
2467                         nvgre_spec =
2468                                 (const struct rte_flow_item_nvgre *)item->spec;
2469                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2470                             rte_cpu_to_be_16(0x2000) ||
2471                             nvgre_spec->protocol !=
2472                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2473                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2474                                 rte_flow_error_set(error, EINVAL,
2475                                         RTE_FLOW_ERROR_TYPE_ITEM,
2476                                         item, "Not supported by fdir filter");
2477                                 return -rte_errno;
2478                         }
2479                         /* tni is a 24-bits bit field */
2480                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2481                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2482                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2483                 }
2484         }
2485
2486         /* check if the next not void item is MAC */
2487         item = next_no_void_pattern(pattern, item);
2488         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2489                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2490                 rte_flow_error_set(error, EINVAL,
2491                         RTE_FLOW_ERROR_TYPE_ITEM,
2492                         item, "Not supported by fdir filter");
2493                 return -rte_errno;
2494         }
2495
2496         /**
2497          * Only support vlan and dst MAC address,
2498          * others should be masked.
2499          */
2500
2501         if (!item->mask) {
2502                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2503                 rte_flow_error_set(error, EINVAL,
2504                         RTE_FLOW_ERROR_TYPE_ITEM,
2505                         item, "Not supported by fdir filter");
2506                 return -rte_errno;
2507         }
2508         /*Not supported last point for range*/
2509         if (item->last) {
2510                 rte_flow_error_set(error, EINVAL,
2511                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2512                         item, "Not supported last point for range");
2513                 return -rte_errno;
2514         }
2515         rule->b_mask = TRUE;
2516         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2517
2518         /* Ether type should be masked. */
2519         if (eth_mask->type) {
2520                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2521                 rte_flow_error_set(error, EINVAL,
2522                         RTE_FLOW_ERROR_TYPE_ITEM,
2523                         item, "Not supported by fdir filter");
2524                 return -rte_errno;
2525         }
2526
2527         /* src MAC address should be masked. */
2528         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2529                 if (eth_mask->src.addr_bytes[j]) {
2530                         memset(rule, 0,
2531                                sizeof(struct ixgbe_fdir_rule));
2532                         rte_flow_error_set(error, EINVAL,
2533                                 RTE_FLOW_ERROR_TYPE_ITEM,
2534                                 item, "Not supported by fdir filter");
2535                         return -rte_errno;
2536                 }
2537         }
2538         rule->mask.mac_addr_byte_mask = 0;
2539         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2540                 /* It's a per byte mask. */
2541                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2542                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2543                 } else if (eth_mask->dst.addr_bytes[j]) {
2544                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2545                         rte_flow_error_set(error, EINVAL,
2546                                 RTE_FLOW_ERROR_TYPE_ITEM,
2547                                 item, "Not supported by fdir filter");
2548                         return -rte_errno;
2549                 }
2550         }
2551
2552         /* When no vlan, considered as full mask. */
2553         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2554
2555         if (item->spec) {
2556                 rule->b_spec = TRUE;
2557                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2558
2559                 /* Get the dst MAC. */
2560                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2561                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2562                                 eth_spec->dst.addr_bytes[j];
2563                 }
2564         }
2565
2566         /**
2567          * Check if the next not void item is vlan or ipv4.
2568          * IPv6 is not supported.
2569          */
2570         item = next_no_void_pattern(pattern, item);
2571         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2572                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2573                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2574                 rte_flow_error_set(error, EINVAL,
2575                         RTE_FLOW_ERROR_TYPE_ITEM,
2576                         item, "Not supported by fdir filter");
2577                 return -rte_errno;
2578         }
2579         /*Not supported last point for range*/
2580         if (item->last) {
2581                 rte_flow_error_set(error, EINVAL,
2582                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2583                         item, "Not supported last point for range");
2584                 return -rte_errno;
2585         }
2586
2587         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2588                 if (!(item->spec && item->mask)) {
2589                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2590                         rte_flow_error_set(error, EINVAL,
2591                                 RTE_FLOW_ERROR_TYPE_ITEM,
2592                                 item, "Not supported by fdir filter");
2593                         return -rte_errno;
2594                 }
2595
2596                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2597                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2598
2599                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2600
2601                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2602                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2603                 /* More than one tags are not supported. */
2604
2605                 /* check if the next not void item is END */
2606                 item = next_no_void_pattern(pattern, item);
2607
2608                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2609                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2610                         rte_flow_error_set(error, EINVAL,
2611                                 RTE_FLOW_ERROR_TYPE_ITEM,
2612                                 item, "Not supported by fdir filter");
2613                         return -rte_errno;
2614                 }
2615         }
2616
2617         /**
2618          * If the tags is 0, it means don't care about the VLAN.
2619          * Do nothing.
2620          */
2621
2622         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2623 }
2624
2625 static int
2626 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2627                         const struct rte_flow_attr *attr,
2628                         const struct rte_flow_item pattern[],
2629                         const struct rte_flow_action actions[],
2630                         struct ixgbe_fdir_rule *rule,
2631                         struct rte_flow_error *error)
2632 {
2633         int ret;
2634         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2635         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2636
2637         if (hw->mac.type != ixgbe_mac_82599EB &&
2638                 hw->mac.type != ixgbe_mac_X540 &&
2639                 hw->mac.type != ixgbe_mac_X550 &&
2640                 hw->mac.type != ixgbe_mac_X550EM_x &&
2641                 hw->mac.type != ixgbe_mac_X550EM_a)
2642                 return -ENOTSUP;
2643
2644         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2645                                         actions, rule, error);
2646
2647         if (!ret)
2648                 goto step_next;
2649
2650         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2651                                         actions, rule, error);
2652
2653         if (ret)
2654                 return ret;
2655
2656 step_next:
2657
2658         if (hw->mac.type == ixgbe_mac_82599EB &&
2659                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2660                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2661                 rule->ixgbe_fdir.formatted.dst_port != 0))
2662                 return -ENOTSUP;
2663
2664         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2665             fdir_mode != rule->mode)
2666                 return -ENOTSUP;
2667
2668         if (rule->queue >= dev->data->nb_rx_queues)
2669                 return -ENOTSUP;
2670
2671         return ret;
2672 }
2673
2674 void
2675 ixgbe_filterlist_init(void)
2676 {
2677         TAILQ_INIT(&filter_ntuple_list);
2678         TAILQ_INIT(&filter_ethertype_list);
2679         TAILQ_INIT(&filter_syn_list);
2680         TAILQ_INIT(&filter_fdir_list);
2681         TAILQ_INIT(&filter_l2_tunnel_list);
2682         TAILQ_INIT(&ixgbe_flow_list);
2683 }
2684
2685 void
2686 ixgbe_filterlist_flush(void)
2687 {
2688         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2689         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2690         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2691         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2692         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2693         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2694
2695         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2696                 TAILQ_REMOVE(&filter_ntuple_list,
2697                                  ntuple_filter_ptr,
2698                                  entries);
2699                 rte_free(ntuple_filter_ptr);
2700         }
2701
2702         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2703                 TAILQ_REMOVE(&filter_ethertype_list,
2704                                  ethertype_filter_ptr,
2705                                  entries);
2706                 rte_free(ethertype_filter_ptr);
2707         }
2708
2709         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2710                 TAILQ_REMOVE(&filter_syn_list,
2711                                  syn_filter_ptr,
2712                                  entries);
2713                 rte_free(syn_filter_ptr);
2714         }
2715
2716         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2717                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2718                                  l2_tn_filter_ptr,
2719                                  entries);
2720                 rte_free(l2_tn_filter_ptr);
2721         }
2722
2723         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2724                 TAILQ_REMOVE(&filter_fdir_list,
2725                                  fdir_rule_ptr,
2726                                  entries);
2727                 rte_free(fdir_rule_ptr);
2728         }
2729
2730         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2731                 TAILQ_REMOVE(&ixgbe_flow_list,
2732                                  ixgbe_flow_mem_ptr,
2733                                  entries);
2734                 rte_free(ixgbe_flow_mem_ptr->flow);
2735                 rte_free(ixgbe_flow_mem_ptr);
2736         }
2737 }
2738
2739 /**
2740  * Create or destroy a flow rule.
2741  * Theorically one rule can match more than one filters.
2742  * We will let it use the filter which it hitt first.
2743  * So, the sequence matters.
2744  */
2745 static struct rte_flow *
2746 ixgbe_flow_create(struct rte_eth_dev *dev,
2747                   const struct rte_flow_attr *attr,
2748                   const struct rte_flow_item pattern[],
2749                   const struct rte_flow_action actions[],
2750                   struct rte_flow_error *error)
2751 {
2752         int ret;
2753         struct rte_eth_ntuple_filter ntuple_filter;
2754         struct rte_eth_ethertype_filter ethertype_filter;
2755         struct rte_eth_syn_filter syn_filter;
2756         struct ixgbe_fdir_rule fdir_rule;
2757         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2758         struct ixgbe_hw_fdir_info *fdir_info =
2759                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2760         struct rte_flow *flow = NULL;
2761         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2762         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2763         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2764         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2765         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2766         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2767         uint8_t first_mask = FALSE;
2768
2769         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2770         if (!flow) {
2771                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2772                 return (struct rte_flow *)flow;
2773         }
2774         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2775                         sizeof(struct ixgbe_flow_mem), 0);
2776         if (!ixgbe_flow_mem_ptr) {
2777                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2778                 rte_free(flow);
2779                 return NULL;
2780         }
2781         ixgbe_flow_mem_ptr->flow = flow;
2782         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2783                                 ixgbe_flow_mem_ptr, entries);
2784
2785         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2786         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2787                         actions, &ntuple_filter, error);
2788
2789 #ifdef RTE_LIBRTE_SECURITY
2790         /* ESP flow not really a flow*/
2791         if (ntuple_filter.proto == IPPROTO_ESP)
2792                 return flow;
2793 #endif
2794
2795         if (!ret) {
2796                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2797                 if (!ret) {
2798                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2799                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2800                         if (!ntuple_filter_ptr) {
2801                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2802                                 goto out;
2803                         }
2804                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2805                                 &ntuple_filter,
2806                                 sizeof(struct rte_eth_ntuple_filter));
2807                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2808                                 ntuple_filter_ptr, entries);
2809                         flow->rule = ntuple_filter_ptr;
2810                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2811                         return flow;
2812                 }
2813                 goto out;
2814         }
2815
2816         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2817         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2818                                 actions, &ethertype_filter, error);
2819         if (!ret) {
2820                 ret = ixgbe_add_del_ethertype_filter(dev,
2821                                 &ethertype_filter, TRUE);
2822                 if (!ret) {
2823                         ethertype_filter_ptr = rte_zmalloc(
2824                                 "ixgbe_ethertype_filter",
2825                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2826                         if (!ethertype_filter_ptr) {
2827                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2828                                 goto out;
2829                         }
2830                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2831                                 &ethertype_filter,
2832                                 sizeof(struct rte_eth_ethertype_filter));
2833                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2834                                 ethertype_filter_ptr, entries);
2835                         flow->rule = ethertype_filter_ptr;
2836                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2837                         return flow;
2838                 }
2839                 goto out;
2840         }
2841
2842         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2843         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2844                                 actions, &syn_filter, error);
2845         if (!ret) {
2846                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2847                 if (!ret) {
2848                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2849                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2850                         if (!syn_filter_ptr) {
2851                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2852                                 goto out;
2853                         }
2854                         rte_memcpy(&syn_filter_ptr->filter_info,
2855                                 &syn_filter,
2856                                 sizeof(struct rte_eth_syn_filter));
2857                         TAILQ_INSERT_TAIL(&filter_syn_list,
2858                                 syn_filter_ptr,
2859                                 entries);
2860                         flow->rule = syn_filter_ptr;
2861                         flow->filter_type = RTE_ETH_FILTER_SYN;
2862                         return flow;
2863                 }
2864                 goto out;
2865         }
2866
2867         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2868         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2869                                 actions, &fdir_rule, error);
2870         if (!ret) {
2871                 /* A mask cannot be deleted. */
2872                 if (fdir_rule.b_mask) {
2873                         if (!fdir_info->mask_added) {
2874                                 /* It's the first time the mask is set. */
2875                                 rte_memcpy(&fdir_info->mask,
2876                                         &fdir_rule.mask,
2877                                         sizeof(struct ixgbe_hw_fdir_mask));
2878                                 fdir_info->flex_bytes_offset =
2879                                         fdir_rule.flex_bytes_offset;
2880
2881                                 if (fdir_rule.mask.flex_bytes_mask)
2882                                         ixgbe_fdir_set_flexbytes_offset(dev,
2883                                                 fdir_rule.flex_bytes_offset);
2884
2885                                 ret = ixgbe_fdir_set_input_mask(dev);
2886                                 if (ret)
2887                                         goto out;
2888
2889                                 fdir_info->mask_added = TRUE;
2890                                 first_mask = TRUE;
2891                         } else {
2892                                 /**
2893                                  * Only support one global mask,
2894                                  * all the masks should be the same.
2895                                  */
2896                                 ret = memcmp(&fdir_info->mask,
2897                                         &fdir_rule.mask,
2898                                         sizeof(struct ixgbe_hw_fdir_mask));
2899                                 if (ret)
2900                                         goto out;
2901
2902                                 if (fdir_info->flex_bytes_offset !=
2903                                                 fdir_rule.flex_bytes_offset)
2904                                         goto out;
2905                         }
2906                 }
2907
2908                 if (fdir_rule.b_spec) {
2909                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2910                                         FALSE, FALSE);
2911                         if (!ret) {
2912                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2913                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2914                                 if (!fdir_rule_ptr) {
2915                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2916                                         goto out;
2917                                 }
2918                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2919                                         &fdir_rule,
2920                                         sizeof(struct ixgbe_fdir_rule));
2921                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2922                                         fdir_rule_ptr, entries);
2923                                 flow->rule = fdir_rule_ptr;
2924                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2925
2926                                 return flow;
2927                         }
2928
2929                         if (ret) {
2930                                 /**
2931                                  * clean the mask_added flag if fail to
2932                                  * program
2933                                  **/
2934                                 if (first_mask)
2935                                         fdir_info->mask_added = FALSE;
2936                                 goto out;
2937                         }
2938                 }
2939
2940                 goto out;
2941         }
2942
2943         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2944         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2945                                         actions, &l2_tn_filter, error);
2946         if (!ret) {
2947                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2948                 if (!ret) {
2949                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2950                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2951                         if (!l2_tn_filter_ptr) {
2952                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2953                                 goto out;
2954                         }
2955                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
2956                                 &l2_tn_filter,
2957                                 sizeof(struct rte_eth_l2_tunnel_conf));
2958                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2959                                 l2_tn_filter_ptr, entries);
2960                         flow->rule = l2_tn_filter_ptr;
2961                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2962                         return flow;
2963                 }
2964         }
2965
2966 out:
2967         TAILQ_REMOVE(&ixgbe_flow_list,
2968                 ixgbe_flow_mem_ptr, entries);
2969         rte_flow_error_set(error, -ret,
2970                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2971                            "Failed to create flow.");
2972         rte_free(ixgbe_flow_mem_ptr);
2973         rte_free(flow);
2974         return NULL;
2975 }
2976
2977 /**
2978  * Check if the flow rule is supported by ixgbe.
2979  * It only checkes the format. Don't guarantee the rule can be programmed into
2980  * the HW. Because there can be no enough room for the rule.
2981  */
2982 static int
2983 ixgbe_flow_validate(struct rte_eth_dev *dev,
2984                 const struct rte_flow_attr *attr,
2985                 const struct rte_flow_item pattern[],
2986                 const struct rte_flow_action actions[],
2987                 struct rte_flow_error *error)
2988 {
2989         struct rte_eth_ntuple_filter ntuple_filter;
2990         struct rte_eth_ethertype_filter ethertype_filter;
2991         struct rte_eth_syn_filter syn_filter;
2992         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2993         struct ixgbe_fdir_rule fdir_rule;
2994         int ret;
2995
2996         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2997         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2998                                 actions, &ntuple_filter, error);
2999         if (!ret)
3000                 return 0;
3001
3002         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3003         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3004                                 actions, &ethertype_filter, error);
3005         if (!ret)
3006                 return 0;
3007
3008         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3009         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3010                                 actions, &syn_filter, error);
3011         if (!ret)
3012                 return 0;
3013
3014         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3015         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3016                                 actions, &fdir_rule, error);
3017         if (!ret)
3018                 return 0;
3019
3020         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3021         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3022                                 actions, &l2_tn_filter, error);
3023
3024         return ret;
3025 }
3026
3027 /* Destroy a flow rule on ixgbe. */
3028 static int
3029 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3030                 struct rte_flow *flow,
3031                 struct rte_flow_error *error)
3032 {
3033         int ret;
3034         struct rte_flow *pmd_flow = flow;
3035         enum rte_filter_type filter_type = pmd_flow->filter_type;
3036         struct rte_eth_ntuple_filter ntuple_filter;
3037         struct rte_eth_ethertype_filter ethertype_filter;
3038         struct rte_eth_syn_filter syn_filter;
3039         struct ixgbe_fdir_rule fdir_rule;
3040         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3041         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3042         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3043         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3044         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3045         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3046         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3047         struct ixgbe_hw_fdir_info *fdir_info =
3048                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3049
3050         switch (filter_type) {
3051         case RTE_ETH_FILTER_NTUPLE:
3052                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3053                                         pmd_flow->rule;
3054                 rte_memcpy(&ntuple_filter,
3055                         &ntuple_filter_ptr->filter_info,
3056                         sizeof(struct rte_eth_ntuple_filter));
3057                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3058                 if (!ret) {
3059                         TAILQ_REMOVE(&filter_ntuple_list,
3060                         ntuple_filter_ptr, entries);
3061                         rte_free(ntuple_filter_ptr);
3062                 }
3063                 break;
3064         case RTE_ETH_FILTER_ETHERTYPE:
3065                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3066                                         pmd_flow->rule;
3067                 rte_memcpy(&ethertype_filter,
3068                         &ethertype_filter_ptr->filter_info,
3069                         sizeof(struct rte_eth_ethertype_filter));
3070                 ret = ixgbe_add_del_ethertype_filter(dev,
3071                                 &ethertype_filter, FALSE);
3072                 if (!ret) {
3073                         TAILQ_REMOVE(&filter_ethertype_list,
3074                                 ethertype_filter_ptr, entries);
3075                         rte_free(ethertype_filter_ptr);
3076                 }
3077                 break;
3078         case RTE_ETH_FILTER_SYN:
3079                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3080                                 pmd_flow->rule;
3081                 rte_memcpy(&syn_filter,
3082                         &syn_filter_ptr->filter_info,
3083                         sizeof(struct rte_eth_syn_filter));
3084                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3085                 if (!ret) {
3086                         TAILQ_REMOVE(&filter_syn_list,
3087                                 syn_filter_ptr, entries);
3088                         rte_free(syn_filter_ptr);
3089                 }
3090                 break;
3091         case RTE_ETH_FILTER_FDIR:
3092                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3093                 rte_memcpy(&fdir_rule,
3094                         &fdir_rule_ptr->filter_info,
3095                         sizeof(struct ixgbe_fdir_rule));
3096                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3097                 if (!ret) {
3098                         TAILQ_REMOVE(&filter_fdir_list,
3099                                 fdir_rule_ptr, entries);
3100                         rte_free(fdir_rule_ptr);
3101                         if (TAILQ_EMPTY(&filter_fdir_list))
3102                                 fdir_info->mask_added = false;
3103                 }
3104                 break;
3105         case RTE_ETH_FILTER_L2_TUNNEL:
3106                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3107                                 pmd_flow->rule;
3108                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3109                         sizeof(struct rte_eth_l2_tunnel_conf));
3110                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3111                 if (!ret) {
3112                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3113                                 l2_tn_filter_ptr, entries);
3114                         rte_free(l2_tn_filter_ptr);
3115                 }
3116                 break;
3117         default:
3118                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3119                             filter_type);
3120                 ret = -EINVAL;
3121                 break;
3122         }
3123
3124         if (ret) {
3125                 rte_flow_error_set(error, EINVAL,
3126                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3127                                 NULL, "Failed to destroy flow");
3128                 return ret;
3129         }
3130
3131         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3132                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3133                         TAILQ_REMOVE(&ixgbe_flow_list,
3134                                 ixgbe_flow_mem_ptr, entries);
3135                         rte_free(ixgbe_flow_mem_ptr);
3136                 }
3137         }
3138         rte_free(flow);
3139
3140         return ret;
3141 }
3142
3143 /*  Destroy all flow rules associated with a port on ixgbe. */
3144 static int
3145 ixgbe_flow_flush(struct rte_eth_dev *dev,
3146                 struct rte_flow_error *error)
3147 {
3148         int ret = 0;
3149
3150         ixgbe_clear_all_ntuple_filter(dev);
3151         ixgbe_clear_all_ethertype_filter(dev);
3152         ixgbe_clear_syn_filter(dev);
3153
3154         ret = ixgbe_clear_all_fdir_filter(dev);
3155         if (ret < 0) {
3156                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3157                                         NULL, "Failed to flush rule");
3158                 return ret;
3159         }
3160
3161         ret = ixgbe_clear_all_l2_tn_filter(dev);
3162         if (ret < 0) {
3163                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3164                                         NULL, "Failed to flush rule");
3165                 return ret;
3166         }
3167
3168         ixgbe_filterlist_flush();
3169
3170         return 0;
3171 }
3172
3173 const struct rte_flow_ops ixgbe_flow_ops = {
3174         .validate = ixgbe_flow_validate,
3175         .create = ixgbe_flow_create,
3176         .destroy = ixgbe_flow_destroy,
3177         .flush = ixgbe_flow_flush,
3178 };