net/ixgbe: remove void pointer cast
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_eal.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
31 #include <rte_dev.h>
32 #include <rte_hash_crc.h>
33 #include <rte_flow.h>
34 #include <rte_flow_driver.h>
35
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
46
47
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
51
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55         struct rte_eth_ntuple_filter filter_info;
56 };
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60         struct rte_eth_ethertype_filter filter_info;
61 };
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65         struct rte_eth_syn_filter filter_info;
66 };
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70         struct ixgbe_fdir_rule filter_info;
71 };
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75         struct rte_eth_l2_tunnel_conf filter_info;
76 };
77 /* rss filter list structure */
78 struct ixgbe_rss_conf_ele {
79         TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
80         struct ixgbe_rte_flow_rss_conf filter_info;
81 };
82 /* ixgbe_flow memory list structure */
83 struct ixgbe_flow_mem {
84         TAILQ_ENTRY(ixgbe_flow_mem) entries;
85         struct rte_flow *flow;
86 };
87
88 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
89 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
90 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
91 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
92 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
93 TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
94 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
95
96 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
97 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
98 static struct ixgbe_syn_filter_list filter_syn_list;
99 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
100 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
101 static struct ixgbe_rss_filter_list filter_rss_list;
102 static struct ixgbe_flow_mem_list ixgbe_flow_list;
103
104 /**
105  * Endless loop will never happen with below assumption
106  * 1. there is at least one no-void item(END)
107  * 2. cur is before END.
108  */
109 static inline
110 const struct rte_flow_item *next_no_void_pattern(
111                 const struct rte_flow_item pattern[],
112                 const struct rte_flow_item *cur)
113 {
114         const struct rte_flow_item *next =
115                 cur ? cur + 1 : &pattern[0];
116         while (1) {
117                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
118                         return next;
119                 next++;
120         }
121 }
122
123 static inline
124 const struct rte_flow_action *next_no_void_action(
125                 const struct rte_flow_action actions[],
126                 const struct rte_flow_action *cur)
127 {
128         const struct rte_flow_action *next =
129                 cur ? cur + 1 : &actions[0];
130         while (1) {
131                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
132                         return next;
133                 next++;
134         }
135 }
136
137 /**
138  * Please aware there's an asumption for all the parsers.
139  * rte_flow_item is using big endian, rte_flow_attr and
140  * rte_flow_action are using CPU order.
141  * Because the pattern is used to describe the packets,
142  * normally the packets should use network order.
143  */
144
145 /**
146  * Parse the rule to see if it is a n-tuple rule.
147  * And get the n-tuple filter info BTW.
148  * pattern:
149  * The first not void item can be ETH or IPV4.
150  * The second not void item must be IPV4 if the first one is ETH.
151  * The third not void item must be UDP or TCP.
152  * The next not void item must be END.
153  * action:
154  * The first not void action should be QUEUE.
155  * The next not void action should be END.
156  * pattern example:
157  * ITEM         Spec                    Mask
158  * ETH          NULL                    NULL
159  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
160  *              dst_addr 192.167.3.50   0xFFFFFFFF
161  *              next_proto_id   17      0xFF
162  * UDP/TCP/     src_port        80      0xFFFF
163  * SCTP         dst_port        80      0xFFFF
164  * END
165  * other members in mask and spec should set to 0x00.
166  * item->last should be NULL.
167  *
168  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
169  *
170  */
171 static int
172 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
173                          const struct rte_flow_item pattern[],
174                          const struct rte_flow_action actions[],
175                          struct rte_eth_ntuple_filter *filter,
176                          struct rte_flow_error *error)
177 {
178         const struct rte_flow_item *item;
179         const struct rte_flow_action *act;
180         const struct rte_flow_item_ipv4 *ipv4_spec;
181         const struct rte_flow_item_ipv4 *ipv4_mask;
182         const struct rte_flow_item_tcp *tcp_spec;
183         const struct rte_flow_item_tcp *tcp_mask;
184         const struct rte_flow_item_udp *udp_spec;
185         const struct rte_flow_item_udp *udp_mask;
186         const struct rte_flow_item_sctp *sctp_spec;
187         const struct rte_flow_item_sctp *sctp_mask;
188         const struct rte_flow_item_eth *eth_spec;
189         const struct rte_flow_item_eth *eth_mask;
190         const struct rte_flow_item_vlan *vlan_spec;
191         const struct rte_flow_item_vlan *vlan_mask;
192         struct rte_flow_item_eth eth_null;
193         struct rte_flow_item_vlan vlan_null;
194
195         if (!pattern) {
196                 rte_flow_error_set(error,
197                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
198                         NULL, "NULL pattern.");
199                 return -rte_errno;
200         }
201
202         if (!actions) {
203                 rte_flow_error_set(error, EINVAL,
204                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
205                                    NULL, "NULL action.");
206                 return -rte_errno;
207         }
208         if (!attr) {
209                 rte_flow_error_set(error, EINVAL,
210                                    RTE_FLOW_ERROR_TYPE_ATTR,
211                                    NULL, "NULL attribute.");
212                 return -rte_errno;
213         }
214
215         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
216         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
217
218 #ifdef RTE_LIBRTE_SECURITY
219         /**
220          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
221          */
222         act = next_no_void_action(actions, NULL);
223         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
224                 const void *conf = act->conf;
225                 /* check if the next not void item is END */
226                 act = next_no_void_action(actions, act);
227                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
228                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229                         rte_flow_error_set(error, EINVAL,
230                                 RTE_FLOW_ERROR_TYPE_ACTION,
231                                 act, "Not supported action.");
232                         return -rte_errno;
233                 }
234
235                 /* get the IP pattern*/
236                 item = next_no_void_pattern(pattern, NULL);
237                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
238                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
239                         if (item->last ||
240                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
241                                 rte_flow_error_set(error, EINVAL,
242                                         RTE_FLOW_ERROR_TYPE_ITEM,
243                                         item, "IP pattern missing.");
244                                 return -rte_errno;
245                         }
246                         item = next_no_void_pattern(pattern, item);
247                 }
248
249                 filter->proto = IPPROTO_ESP;
250                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
251                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
252         }
253 #endif
254
255         /* the first not void item can be MAC or IPv4 */
256         item = next_no_void_pattern(pattern, NULL);
257
258         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
259             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260                 rte_flow_error_set(error, EINVAL,
261                         RTE_FLOW_ERROR_TYPE_ITEM,
262                         item, "Not supported by ntuple filter");
263                 return -rte_errno;
264         }
265         /* Skip Ethernet */
266         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
267                 eth_spec = item->spec;
268                 eth_mask = item->mask;
269                 /*Not supported last point for range*/
270                 if (item->last) {
271                         rte_flow_error_set(error,
272                           EINVAL,
273                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274                           item, "Not supported last point for range");
275                         return -rte_errno;
276
277                 }
278                 /* if the first item is MAC, the content should be NULL */
279                 if ((item->spec || item->mask) &&
280                         (memcmp(eth_spec, &eth_null,
281                                 sizeof(struct rte_flow_item_eth)) ||
282                          memcmp(eth_mask, &eth_null,
283                                 sizeof(struct rte_flow_item_eth)))) {
284                         rte_flow_error_set(error, EINVAL,
285                                 RTE_FLOW_ERROR_TYPE_ITEM,
286                                 item, "Not supported by ntuple filter");
287                         return -rte_errno;
288                 }
289                 /* check if the next not void item is IPv4 or Vlan */
290                 item = next_no_void_pattern(pattern, item);
291                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
292                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
293                         rte_flow_error_set(error,
294                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
295                           item, "Not supported by ntuple filter");
296                           return -rte_errno;
297                 }
298         }
299
300         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
301                 vlan_spec = item->spec;
302                 vlan_mask = item->mask;
303                 /*Not supported last point for range*/
304                 if (item->last) {
305                         rte_flow_error_set(error,
306                           EINVAL,
307                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
308                           item, "Not supported last point for range");
309                         return -rte_errno;
310                 }
311                 /* the content should be NULL */
312                 if ((item->spec || item->mask) &&
313                         (memcmp(vlan_spec, &vlan_null,
314                                 sizeof(struct rte_flow_item_vlan)) ||
315                          memcmp(vlan_mask, &vlan_null,
316                                 sizeof(struct rte_flow_item_vlan)))) {
317
318                         rte_flow_error_set(error, EINVAL,
319                                 RTE_FLOW_ERROR_TYPE_ITEM,
320                                 item, "Not supported by ntuple filter");
321                         return -rte_errno;
322                 }
323                 /* check if the next not void item is IPv4 */
324                 item = next_no_void_pattern(pattern, item);
325                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
326                         rte_flow_error_set(error,
327                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
328                           item, "Not supported by ntuple filter");
329                         return -rte_errno;
330                 }
331         }
332
333         if (item->mask) {
334                 /* get the IPv4 info */
335                 if (!item->spec || !item->mask) {
336                         rte_flow_error_set(error, EINVAL,
337                                 RTE_FLOW_ERROR_TYPE_ITEM,
338                                 item, "Invalid ntuple mask");
339                         return -rte_errno;
340                 }
341                 /*Not supported last point for range*/
342                 if (item->last) {
343                         rte_flow_error_set(error, EINVAL,
344                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
345                                 item, "Not supported last point for range");
346                         return -rte_errno;
347                 }
348
349                 ipv4_mask = item->mask;
350                 /**
351                  * Only support src & dst addresses, protocol,
352                  * others should be masked.
353                  */
354                 if (ipv4_mask->hdr.version_ihl ||
355                     ipv4_mask->hdr.type_of_service ||
356                     ipv4_mask->hdr.total_length ||
357                     ipv4_mask->hdr.packet_id ||
358                     ipv4_mask->hdr.fragment_offset ||
359                     ipv4_mask->hdr.time_to_live ||
360                     ipv4_mask->hdr.hdr_checksum) {
361                         rte_flow_error_set(error,
362                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363                                 item, "Not supported by ntuple filter");
364                         return -rte_errno;
365                 }
366
367                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
368                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
369                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
370
371                 ipv4_spec = item->spec;
372                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
373                 filter->src_ip = ipv4_spec->hdr.src_addr;
374                 filter->proto  = ipv4_spec->hdr.next_proto_id;
375         }
376
377         /* check if the next not void item is TCP or UDP */
378         item = next_no_void_pattern(pattern, item);
379         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
380             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
381             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
382             item->type != RTE_FLOW_ITEM_TYPE_END) {
383                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
384                 rte_flow_error_set(error, EINVAL,
385                         RTE_FLOW_ERROR_TYPE_ITEM,
386                         item, "Not supported by ntuple filter");
387                 return -rte_errno;
388         }
389
390         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
391                 (!item->spec && !item->mask)) {
392                 goto action;
393         }
394
395         /* get the TCP/UDP/SCTP info */
396         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
397                 (!item->spec || !item->mask)) {
398                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
399                 rte_flow_error_set(error, EINVAL,
400                         RTE_FLOW_ERROR_TYPE_ITEM,
401                         item, "Invalid ntuple mask");
402                 return -rte_errno;
403         }
404
405         /*Not supported last point for range*/
406         if (item->last) {
407                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408                 rte_flow_error_set(error, EINVAL,
409                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
410                         item, "Not supported last point for range");
411                 return -rte_errno;
412
413         }
414
415         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
416                 tcp_mask = item->mask;
417
418                 /**
419                  * Only support src & dst ports, tcp flags,
420                  * others should be masked.
421                  */
422                 if (tcp_mask->hdr.sent_seq ||
423                     tcp_mask->hdr.recv_ack ||
424                     tcp_mask->hdr.data_off ||
425                     tcp_mask->hdr.rx_win ||
426                     tcp_mask->hdr.cksum ||
427                     tcp_mask->hdr.tcp_urp) {
428                         memset(filter, 0,
429                                 sizeof(struct rte_eth_ntuple_filter));
430                         rte_flow_error_set(error, EINVAL,
431                                 RTE_FLOW_ERROR_TYPE_ITEM,
432                                 item, "Not supported by ntuple filter");
433                         return -rte_errno;
434                 }
435
436                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
437                 filter->src_port_mask  = tcp_mask->hdr.src_port;
438                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
439                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
440                 } else if (!tcp_mask->hdr.tcp_flags) {
441                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
442                 } else {
443                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
444                         rte_flow_error_set(error, EINVAL,
445                                 RTE_FLOW_ERROR_TYPE_ITEM,
446                                 item, "Not supported by ntuple filter");
447                         return -rte_errno;
448                 }
449
450                 tcp_spec = item->spec;
451                 filter->dst_port  = tcp_spec->hdr.dst_port;
452                 filter->src_port  = tcp_spec->hdr.src_port;
453                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
454         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
455                 udp_mask = item->mask;
456
457                 /**
458                  * Only support src & dst ports,
459                  * others should be masked.
460                  */
461                 if (udp_mask->hdr.dgram_len ||
462                     udp_mask->hdr.dgram_cksum) {
463                         memset(filter, 0,
464                                 sizeof(struct rte_eth_ntuple_filter));
465                         rte_flow_error_set(error, EINVAL,
466                                 RTE_FLOW_ERROR_TYPE_ITEM,
467                                 item, "Not supported by ntuple filter");
468                         return -rte_errno;
469                 }
470
471                 filter->dst_port_mask = udp_mask->hdr.dst_port;
472                 filter->src_port_mask = udp_mask->hdr.src_port;
473
474                 udp_spec = item->spec;
475                 filter->dst_port = udp_spec->hdr.dst_port;
476                 filter->src_port = udp_spec->hdr.src_port;
477         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
478                 sctp_mask = item->mask;
479
480                 /**
481                  * Only support src & dst ports,
482                  * others should be masked.
483                  */
484                 if (sctp_mask->hdr.tag ||
485                     sctp_mask->hdr.cksum) {
486                         memset(filter, 0,
487                                 sizeof(struct rte_eth_ntuple_filter));
488                         rte_flow_error_set(error, EINVAL,
489                                 RTE_FLOW_ERROR_TYPE_ITEM,
490                                 item, "Not supported by ntuple filter");
491                         return -rte_errno;
492                 }
493
494                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
495                 filter->src_port_mask = sctp_mask->hdr.src_port;
496
497                 sctp_spec = item->spec;
498                 filter->dst_port = sctp_spec->hdr.dst_port;
499                 filter->src_port = sctp_spec->hdr.src_port;
500         } else {
501                 goto action;
502         }
503
504         /* check if the next not void item is END */
505         item = next_no_void_pattern(pattern, item);
506         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
507                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
508                 rte_flow_error_set(error, EINVAL,
509                         RTE_FLOW_ERROR_TYPE_ITEM,
510                         item, "Not supported by ntuple filter");
511                 return -rte_errno;
512         }
513
514 action:
515
516         /**
517          * n-tuple only supports forwarding,
518          * check if the first not void action is QUEUE.
519          */
520         act = next_no_void_action(actions, NULL);
521         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
522                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
523                 rte_flow_error_set(error, EINVAL,
524                         RTE_FLOW_ERROR_TYPE_ACTION,
525                         item, "Not supported action.");
526                 return -rte_errno;
527         }
528         filter->queue =
529                 ((const struct rte_flow_action_queue *)act->conf)->index;
530
531         /* check if the next not void item is END */
532         act = next_no_void_action(actions, act);
533         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
534                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
535                 rte_flow_error_set(error, EINVAL,
536                         RTE_FLOW_ERROR_TYPE_ACTION,
537                         act, "Not supported action.");
538                 return -rte_errno;
539         }
540
541         /* parse attr */
542         /* must be input direction */
543         if (!attr->ingress) {
544                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
545                 rte_flow_error_set(error, EINVAL,
546                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
547                                    attr, "Only support ingress.");
548                 return -rte_errno;
549         }
550
551         /* not supported */
552         if (attr->egress) {
553                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
554                 rte_flow_error_set(error, EINVAL,
555                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
556                                    attr, "Not support egress.");
557                 return -rte_errno;
558         }
559
560         if (attr->priority > 0xFFFF) {
561                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
562                 rte_flow_error_set(error, EINVAL,
563                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
564                                    attr, "Error priority.");
565                 return -rte_errno;
566         }
567         filter->priority = (uint16_t)attr->priority;
568         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
569             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
570             filter->priority = 1;
571
572         return 0;
573 }
574
575 /* a specific function for ixgbe because the flags is specific */
576 static int
577 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
578                           const struct rte_flow_attr *attr,
579                           const struct rte_flow_item pattern[],
580                           const struct rte_flow_action actions[],
581                           struct rte_eth_ntuple_filter *filter,
582                           struct rte_flow_error *error)
583 {
584         int ret;
585         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
586
587         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
588
589         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
590
591         if (ret)
592                 return ret;
593
594 #ifdef RTE_LIBRTE_SECURITY
595         /* ESP flow not really a flow*/
596         if (filter->proto == IPPROTO_ESP)
597                 return 0;
598 #endif
599
600         /* Ixgbe doesn't support tcp flags. */
601         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
602                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
603                 rte_flow_error_set(error, EINVAL,
604                                    RTE_FLOW_ERROR_TYPE_ITEM,
605                                    NULL, "Not supported by ntuple filter");
606                 return -rte_errno;
607         }
608
609         /* Ixgbe doesn't support many priorities. */
610         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
611             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
612                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
613                 rte_flow_error_set(error, EINVAL,
614                         RTE_FLOW_ERROR_TYPE_ITEM,
615                         NULL, "Priority not supported by ntuple filter");
616                 return -rte_errno;
617         }
618
619         if (filter->queue >= dev->data->nb_rx_queues)
620                 return -rte_errno;
621
622         /* fixed value for ixgbe */
623         filter->flags = RTE_5TUPLE_FLAGS;
624         return 0;
625 }
626
627 /**
628  * Parse the rule to see if it is a ethertype rule.
629  * And get the ethertype filter info BTW.
630  * pattern:
631  * The first not void item can be ETH.
632  * The next not void item must be END.
633  * action:
634  * The first not void action should be QUEUE.
635  * The next not void action should be END.
636  * pattern example:
637  * ITEM         Spec                    Mask
638  * ETH          type    0x0807          0xFFFF
639  * END
640  * other members in mask and spec should set to 0x00.
641  * item->last should be NULL.
642  */
643 static int
644 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
645                             const struct rte_flow_item *pattern,
646                             const struct rte_flow_action *actions,
647                             struct rte_eth_ethertype_filter *filter,
648                             struct rte_flow_error *error)
649 {
650         const struct rte_flow_item *item;
651         const struct rte_flow_action *act;
652         const struct rte_flow_item_eth *eth_spec;
653         const struct rte_flow_item_eth *eth_mask;
654         const struct rte_flow_action_queue *act_q;
655
656         if (!pattern) {
657                 rte_flow_error_set(error, EINVAL,
658                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
659                                 NULL, "NULL pattern.");
660                 return -rte_errno;
661         }
662
663         if (!actions) {
664                 rte_flow_error_set(error, EINVAL,
665                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
666                                 NULL, "NULL action.");
667                 return -rte_errno;
668         }
669
670         if (!attr) {
671                 rte_flow_error_set(error, EINVAL,
672                                    RTE_FLOW_ERROR_TYPE_ATTR,
673                                    NULL, "NULL attribute.");
674                 return -rte_errno;
675         }
676
677         item = next_no_void_pattern(pattern, NULL);
678         /* The first non-void item should be MAC. */
679         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
680                 rte_flow_error_set(error, EINVAL,
681                         RTE_FLOW_ERROR_TYPE_ITEM,
682                         item, "Not supported by ethertype filter");
683                 return -rte_errno;
684         }
685
686         /*Not supported last point for range*/
687         if (item->last) {
688                 rte_flow_error_set(error, EINVAL,
689                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
690                         item, "Not supported last point for range");
691                 return -rte_errno;
692         }
693
694         /* Get the MAC info. */
695         if (!item->spec || !item->mask) {
696                 rte_flow_error_set(error, EINVAL,
697                                 RTE_FLOW_ERROR_TYPE_ITEM,
698                                 item, "Not supported by ethertype filter");
699                 return -rte_errno;
700         }
701
702         eth_spec = item->spec;
703         eth_mask = item->mask;
704
705         /* Mask bits of source MAC address must be full of 0.
706          * Mask bits of destination MAC address must be full
707          * of 1 or full of 0.
708          */
709         if (!is_zero_ether_addr(&eth_mask->src) ||
710             (!is_zero_ether_addr(&eth_mask->dst) &&
711              !is_broadcast_ether_addr(&eth_mask->dst))) {
712                 rte_flow_error_set(error, EINVAL,
713                                 RTE_FLOW_ERROR_TYPE_ITEM,
714                                 item, "Invalid ether address mask");
715                 return -rte_errno;
716         }
717
718         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
719                 rte_flow_error_set(error, EINVAL,
720                                 RTE_FLOW_ERROR_TYPE_ITEM,
721                                 item, "Invalid ethertype mask");
722                 return -rte_errno;
723         }
724
725         /* If mask bits of destination MAC address
726          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
727          */
728         if (is_broadcast_ether_addr(&eth_mask->dst)) {
729                 filter->mac_addr = eth_spec->dst;
730                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
731         } else {
732                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
733         }
734         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
735
736         /* Check if the next non-void item is END. */
737         item = next_no_void_pattern(pattern, item);
738         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
739                 rte_flow_error_set(error, EINVAL,
740                                 RTE_FLOW_ERROR_TYPE_ITEM,
741                                 item, "Not supported by ethertype filter.");
742                 return -rte_errno;
743         }
744
745         /* Parse action */
746
747         act = next_no_void_action(actions, NULL);
748         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
749             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
750                 rte_flow_error_set(error, EINVAL,
751                                 RTE_FLOW_ERROR_TYPE_ACTION,
752                                 act, "Not supported action.");
753                 return -rte_errno;
754         }
755
756         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
757                 act_q = (const struct rte_flow_action_queue *)act->conf;
758                 filter->queue = act_q->index;
759         } else {
760                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
761         }
762
763         /* Check if the next non-void item is END */
764         act = next_no_void_action(actions, act);
765         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
766                 rte_flow_error_set(error, EINVAL,
767                                 RTE_FLOW_ERROR_TYPE_ACTION,
768                                 act, "Not supported action.");
769                 return -rte_errno;
770         }
771
772         /* Parse attr */
773         /* Must be input direction */
774         if (!attr->ingress) {
775                 rte_flow_error_set(error, EINVAL,
776                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
777                                 attr, "Only support ingress.");
778                 return -rte_errno;
779         }
780
781         /* Not supported */
782         if (attr->egress) {
783                 rte_flow_error_set(error, EINVAL,
784                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
785                                 attr, "Not support egress.");
786                 return -rte_errno;
787         }
788
789         /* Not supported */
790         if (attr->priority) {
791                 rte_flow_error_set(error, EINVAL,
792                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
793                                 attr, "Not support priority.");
794                 return -rte_errno;
795         }
796
797         /* Not supported */
798         if (attr->group) {
799                 rte_flow_error_set(error, EINVAL,
800                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
801                                 attr, "Not support group.");
802                 return -rte_errno;
803         }
804
805         return 0;
806 }
807
808 static int
809 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
810                                  const struct rte_flow_attr *attr,
811                              const struct rte_flow_item pattern[],
812                              const struct rte_flow_action actions[],
813                              struct rte_eth_ethertype_filter *filter,
814                              struct rte_flow_error *error)
815 {
816         int ret;
817         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
818
819         MAC_TYPE_FILTER_SUP(hw->mac.type);
820
821         ret = cons_parse_ethertype_filter(attr, pattern,
822                                         actions, filter, error);
823
824         if (ret)
825                 return ret;
826
827         /* Ixgbe doesn't support MAC address. */
828         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
829                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
830                 rte_flow_error_set(error, EINVAL,
831                         RTE_FLOW_ERROR_TYPE_ITEM,
832                         NULL, "Not supported by ethertype filter");
833                 return -rte_errno;
834         }
835
836         if (filter->queue >= dev->data->nb_rx_queues) {
837                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
838                 rte_flow_error_set(error, EINVAL,
839                         RTE_FLOW_ERROR_TYPE_ITEM,
840                         NULL, "queue index much too big");
841                 return -rte_errno;
842         }
843
844         if (filter->ether_type == ETHER_TYPE_IPv4 ||
845                 filter->ether_type == ETHER_TYPE_IPv6) {
846                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
847                 rte_flow_error_set(error, EINVAL,
848                         RTE_FLOW_ERROR_TYPE_ITEM,
849                         NULL, "IPv4/IPv6 not supported by ethertype filter");
850                 return -rte_errno;
851         }
852
853         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
854                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
855                 rte_flow_error_set(error, EINVAL,
856                         RTE_FLOW_ERROR_TYPE_ITEM,
857                         NULL, "mac compare is unsupported");
858                 return -rte_errno;
859         }
860
861         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
862                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
863                 rte_flow_error_set(error, EINVAL,
864                         RTE_FLOW_ERROR_TYPE_ITEM,
865                         NULL, "drop option is unsupported");
866                 return -rte_errno;
867         }
868
869         return 0;
870 }
871
872 /**
873  * Parse the rule to see if it is a TCP SYN rule.
874  * And get the TCP SYN filter info BTW.
875  * pattern:
876  * The first not void item must be ETH.
877  * The second not void item must be IPV4 or IPV6.
878  * The third not void item must be TCP.
879  * The next not void item must be END.
880  * action:
881  * The first not void action should be QUEUE.
882  * The next not void action should be END.
883  * pattern example:
884  * ITEM         Spec                    Mask
885  * ETH          NULL                    NULL
886  * IPV4/IPV6    NULL                    NULL
887  * TCP          tcp_flags       0x02    0xFF
888  * END
889  * other members in mask and spec should set to 0x00.
890  * item->last should be NULL.
891  */
892 static int
893 cons_parse_syn_filter(const struct rte_flow_attr *attr,
894                                 const struct rte_flow_item pattern[],
895                                 const struct rte_flow_action actions[],
896                                 struct rte_eth_syn_filter *filter,
897                                 struct rte_flow_error *error)
898 {
899         const struct rte_flow_item *item;
900         const struct rte_flow_action *act;
901         const struct rte_flow_item_tcp *tcp_spec;
902         const struct rte_flow_item_tcp *tcp_mask;
903         const struct rte_flow_action_queue *act_q;
904
905         if (!pattern) {
906                 rte_flow_error_set(error, EINVAL,
907                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
908                                 NULL, "NULL pattern.");
909                 return -rte_errno;
910         }
911
912         if (!actions) {
913                 rte_flow_error_set(error, EINVAL,
914                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
915                                 NULL, "NULL action.");
916                 return -rte_errno;
917         }
918
919         if (!attr) {
920                 rte_flow_error_set(error, EINVAL,
921                                    RTE_FLOW_ERROR_TYPE_ATTR,
922                                    NULL, "NULL attribute.");
923                 return -rte_errno;
924         }
925
926
927         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
928         item = next_no_void_pattern(pattern, NULL);
929         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
930             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
931             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
932             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
933                 rte_flow_error_set(error, EINVAL,
934                                 RTE_FLOW_ERROR_TYPE_ITEM,
935                                 item, "Not supported by syn filter");
936                 return -rte_errno;
937         }
938                 /*Not supported last point for range*/
939         if (item->last) {
940                 rte_flow_error_set(error, EINVAL,
941                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
942                         item, "Not supported last point for range");
943                 return -rte_errno;
944         }
945
946         /* Skip Ethernet */
947         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
948                 /* if the item is MAC, the content should be NULL */
949                 if (item->spec || item->mask) {
950                         rte_flow_error_set(error, EINVAL,
951                                 RTE_FLOW_ERROR_TYPE_ITEM,
952                                 item, "Invalid SYN address mask");
953                         return -rte_errno;
954                 }
955
956                 /* check if the next not void item is IPv4 or IPv6 */
957                 item = next_no_void_pattern(pattern, item);
958                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
959                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
960                         rte_flow_error_set(error, EINVAL,
961                                 RTE_FLOW_ERROR_TYPE_ITEM,
962                                 item, "Not supported by syn filter");
963                         return -rte_errno;
964                 }
965         }
966
967         /* Skip IP */
968         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
969             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
970                 /* if the item is IP, the content should be NULL */
971                 if (item->spec || item->mask) {
972                         rte_flow_error_set(error, EINVAL,
973                                 RTE_FLOW_ERROR_TYPE_ITEM,
974                                 item, "Invalid SYN mask");
975                         return -rte_errno;
976                 }
977
978                 /* check if the next not void item is TCP */
979                 item = next_no_void_pattern(pattern, item);
980                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
981                         rte_flow_error_set(error, EINVAL,
982                                 RTE_FLOW_ERROR_TYPE_ITEM,
983                                 item, "Not supported by syn filter");
984                         return -rte_errno;
985                 }
986         }
987
988         /* Get the TCP info. Only support SYN. */
989         if (!item->spec || !item->mask) {
990                 rte_flow_error_set(error, EINVAL,
991                                 RTE_FLOW_ERROR_TYPE_ITEM,
992                                 item, "Invalid SYN mask");
993                 return -rte_errno;
994         }
995         /*Not supported last point for range*/
996         if (item->last) {
997                 rte_flow_error_set(error, EINVAL,
998                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
999                         item, "Not supported last point for range");
1000                 return -rte_errno;
1001         }
1002
1003         tcp_spec = item->spec;
1004         tcp_mask = item->mask;
1005         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
1006             tcp_mask->hdr.src_port ||
1007             tcp_mask->hdr.dst_port ||
1008             tcp_mask->hdr.sent_seq ||
1009             tcp_mask->hdr.recv_ack ||
1010             tcp_mask->hdr.data_off ||
1011             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
1012             tcp_mask->hdr.rx_win ||
1013             tcp_mask->hdr.cksum ||
1014             tcp_mask->hdr.tcp_urp) {
1015                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1016                 rte_flow_error_set(error, EINVAL,
1017                                 RTE_FLOW_ERROR_TYPE_ITEM,
1018                                 item, "Not supported by syn filter");
1019                 return -rte_errno;
1020         }
1021
1022         /* check if the next not void item is END */
1023         item = next_no_void_pattern(pattern, item);
1024         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1025                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1026                 rte_flow_error_set(error, EINVAL,
1027                                 RTE_FLOW_ERROR_TYPE_ITEM,
1028                                 item, "Not supported by syn filter");
1029                 return -rte_errno;
1030         }
1031
1032         /* check if the first not void action is QUEUE. */
1033         act = next_no_void_action(actions, NULL);
1034         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1035                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1036                 rte_flow_error_set(error, EINVAL,
1037                                 RTE_FLOW_ERROR_TYPE_ACTION,
1038                                 act, "Not supported action.");
1039                 return -rte_errno;
1040         }
1041
1042         act_q = (const struct rte_flow_action_queue *)act->conf;
1043         filter->queue = act_q->index;
1044         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1045                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1046                 rte_flow_error_set(error, EINVAL,
1047                                 RTE_FLOW_ERROR_TYPE_ACTION,
1048                                 act, "Not supported action.");
1049                 return -rte_errno;
1050         }
1051
1052         /* check if the next not void item is END */
1053         act = next_no_void_action(actions, act);
1054         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1055                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1056                 rte_flow_error_set(error, EINVAL,
1057                                 RTE_FLOW_ERROR_TYPE_ACTION,
1058                                 act, "Not supported action.");
1059                 return -rte_errno;
1060         }
1061
1062         /* parse attr */
1063         /* must be input direction */
1064         if (!attr->ingress) {
1065                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1066                 rte_flow_error_set(error, EINVAL,
1067                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1068                         attr, "Only support ingress.");
1069                 return -rte_errno;
1070         }
1071
1072         /* not supported */
1073         if (attr->egress) {
1074                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1075                 rte_flow_error_set(error, EINVAL,
1076                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1077                         attr, "Not support egress.");
1078                 return -rte_errno;
1079         }
1080
1081         /* Support 2 priorities, the lowest or highest. */
1082         if (!attr->priority) {
1083                 filter->hig_pri = 0;
1084         } else if (attr->priority == (uint32_t)~0U) {
1085                 filter->hig_pri = 1;
1086         } else {
1087                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1088                 rte_flow_error_set(error, EINVAL,
1089                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1090                         attr, "Not support priority.");
1091                 return -rte_errno;
1092         }
1093
1094         return 0;
1095 }
1096
1097 static int
1098 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1099                                  const struct rte_flow_attr *attr,
1100                              const struct rte_flow_item pattern[],
1101                              const struct rte_flow_action actions[],
1102                              struct rte_eth_syn_filter *filter,
1103                              struct rte_flow_error *error)
1104 {
1105         int ret;
1106         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1107
1108         MAC_TYPE_FILTER_SUP(hw->mac.type);
1109
1110         ret = cons_parse_syn_filter(attr, pattern,
1111                                         actions, filter, error);
1112
1113         if (filter->queue >= dev->data->nb_rx_queues)
1114                 return -rte_errno;
1115
1116         if (ret)
1117                 return ret;
1118
1119         return 0;
1120 }
1121
1122 /**
1123  * Parse the rule to see if it is a L2 tunnel rule.
1124  * And get the L2 tunnel filter info BTW.
1125  * Only support E-tag now.
1126  * pattern:
1127  * The first not void item can be E_TAG.
1128  * The next not void item must be END.
1129  * action:
1130  * The first not void action should be VF or PF.
1131  * The next not void action should be END.
1132  * pattern example:
1133  * ITEM         Spec                    Mask
1134  * E_TAG        grp             0x1     0x3
1135                 e_cid_base      0x309   0xFFF
1136  * END
1137  * other members in mask and spec should set to 0x00.
1138  * item->last should be NULL.
1139  */
1140 static int
1141 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1142                         const struct rte_flow_attr *attr,
1143                         const struct rte_flow_item pattern[],
1144                         const struct rte_flow_action actions[],
1145                         struct rte_eth_l2_tunnel_conf *filter,
1146                         struct rte_flow_error *error)
1147 {
1148         const struct rte_flow_item *item;
1149         const struct rte_flow_item_e_tag *e_tag_spec;
1150         const struct rte_flow_item_e_tag *e_tag_mask;
1151         const struct rte_flow_action *act;
1152         const struct rte_flow_action_vf *act_vf;
1153         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1154
1155         if (!pattern) {
1156                 rte_flow_error_set(error, EINVAL,
1157                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1158                         NULL, "NULL pattern.");
1159                 return -rte_errno;
1160         }
1161
1162         if (!actions) {
1163                 rte_flow_error_set(error, EINVAL,
1164                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1165                                    NULL, "NULL action.");
1166                 return -rte_errno;
1167         }
1168
1169         if (!attr) {
1170                 rte_flow_error_set(error, EINVAL,
1171                                    RTE_FLOW_ERROR_TYPE_ATTR,
1172                                    NULL, "NULL attribute.");
1173                 return -rte_errno;
1174         }
1175
1176         /* The first not void item should be e-tag. */
1177         item = next_no_void_pattern(pattern, NULL);
1178         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1179                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1180                 rte_flow_error_set(error, EINVAL,
1181                         RTE_FLOW_ERROR_TYPE_ITEM,
1182                         item, "Not supported by L2 tunnel filter");
1183                 return -rte_errno;
1184         }
1185
1186         if (!item->spec || !item->mask) {
1187                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1188                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1189                         item, "Not supported by L2 tunnel filter");
1190                 return -rte_errno;
1191         }
1192
1193         /*Not supported last point for range*/
1194         if (item->last) {
1195                 rte_flow_error_set(error, EINVAL,
1196                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1197                         item, "Not supported last point for range");
1198                 return -rte_errno;
1199         }
1200
1201         e_tag_spec = item->spec;
1202         e_tag_mask = item->mask;
1203
1204         /* Only care about GRP and E cid base. */
1205         if (e_tag_mask->epcp_edei_in_ecid_b ||
1206             e_tag_mask->in_ecid_e ||
1207             e_tag_mask->ecid_e ||
1208             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1209                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1210                 rte_flow_error_set(error, EINVAL,
1211                         RTE_FLOW_ERROR_TYPE_ITEM,
1212                         item, "Not supported by L2 tunnel filter");
1213                 return -rte_errno;
1214         }
1215
1216         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1217         /**
1218          * grp and e_cid_base are bit fields and only use 14 bits.
1219          * e-tag id is taken as little endian by HW.
1220          */
1221         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1222
1223         /* check if the next not void item is END */
1224         item = next_no_void_pattern(pattern, item);
1225         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1226                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1227                 rte_flow_error_set(error, EINVAL,
1228                         RTE_FLOW_ERROR_TYPE_ITEM,
1229                         item, "Not supported by L2 tunnel filter");
1230                 return -rte_errno;
1231         }
1232
1233         /* parse attr */
1234         /* must be input direction */
1235         if (!attr->ingress) {
1236                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1237                 rte_flow_error_set(error, EINVAL,
1238                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1239                         attr, "Only support ingress.");
1240                 return -rte_errno;
1241         }
1242
1243         /* not supported */
1244         if (attr->egress) {
1245                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1246                 rte_flow_error_set(error, EINVAL,
1247                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1248                         attr, "Not support egress.");
1249                 return -rte_errno;
1250         }
1251
1252         /* not supported */
1253         if (attr->priority) {
1254                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1255                 rte_flow_error_set(error, EINVAL,
1256                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1257                         attr, "Not support priority.");
1258                 return -rte_errno;
1259         }
1260
1261         /* check if the first not void action is VF or PF. */
1262         act = next_no_void_action(actions, NULL);
1263         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1264                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1265                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1266                 rte_flow_error_set(error, EINVAL,
1267                         RTE_FLOW_ERROR_TYPE_ACTION,
1268                         act, "Not supported action.");
1269                 return -rte_errno;
1270         }
1271
1272         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1273                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1274                 filter->pool = act_vf->id;
1275         } else {
1276                 filter->pool = pci_dev->max_vfs;
1277         }
1278
1279         /* check if the next not void item is END */
1280         act = next_no_void_action(actions, act);
1281         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1282                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1283                 rte_flow_error_set(error, EINVAL,
1284                         RTE_FLOW_ERROR_TYPE_ACTION,
1285                         act, "Not supported action.");
1286                 return -rte_errno;
1287         }
1288
1289         return 0;
1290 }
1291
1292 static int
1293 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1294                         const struct rte_flow_attr *attr,
1295                         const struct rte_flow_item pattern[],
1296                         const struct rte_flow_action actions[],
1297                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1298                         struct rte_flow_error *error)
1299 {
1300         int ret = 0;
1301         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1302         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1303         uint16_t vf_num;
1304
1305         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1306                                 actions, l2_tn_filter, error);
1307
1308         if (hw->mac.type != ixgbe_mac_X550 &&
1309                 hw->mac.type != ixgbe_mac_X550EM_x &&
1310                 hw->mac.type != ixgbe_mac_X550EM_a) {
1311                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1312                 rte_flow_error_set(error, EINVAL,
1313                         RTE_FLOW_ERROR_TYPE_ITEM,
1314                         NULL, "Not supported by L2 tunnel filter");
1315                 return -rte_errno;
1316         }
1317
1318         vf_num = pci_dev->max_vfs;
1319
1320         if (l2_tn_filter->pool > vf_num)
1321                 return -rte_errno;
1322
1323         return ret;
1324 }
1325
1326 /* Parse to get the attr and action info of flow director rule. */
1327 static int
1328 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1329                           const struct rte_flow_action actions[],
1330                           struct ixgbe_fdir_rule *rule,
1331                           struct rte_flow_error *error)
1332 {
1333         const struct rte_flow_action *act;
1334         const struct rte_flow_action_queue *act_q;
1335         const struct rte_flow_action_mark *mark;
1336
1337         /* parse attr */
1338         /* must be input direction */
1339         if (!attr->ingress) {
1340                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1341                 rte_flow_error_set(error, EINVAL,
1342                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1343                         attr, "Only support ingress.");
1344                 return -rte_errno;
1345         }
1346
1347         /* not supported */
1348         if (attr->egress) {
1349                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1350                 rte_flow_error_set(error, EINVAL,
1351                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1352                         attr, "Not support egress.");
1353                 return -rte_errno;
1354         }
1355
1356         /* not supported */
1357         if (attr->priority) {
1358                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1359                 rte_flow_error_set(error, EINVAL,
1360                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1361                         attr, "Not support priority.");
1362                 return -rte_errno;
1363         }
1364
1365         /* check if the first not void action is QUEUE or DROP. */
1366         act = next_no_void_action(actions, NULL);
1367         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1368             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1369                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1370                 rte_flow_error_set(error, EINVAL,
1371                         RTE_FLOW_ERROR_TYPE_ACTION,
1372                         act, "Not supported action.");
1373                 return -rte_errno;
1374         }
1375
1376         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1377                 act_q = (const struct rte_flow_action_queue *)act->conf;
1378                 rule->queue = act_q->index;
1379         } else { /* drop */
1380                 /* signature mode does not support drop action. */
1381                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1382                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1383                         rte_flow_error_set(error, EINVAL,
1384                                 RTE_FLOW_ERROR_TYPE_ACTION,
1385                                 act, "Not supported action.");
1386                         return -rte_errno;
1387                 }
1388                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1389         }
1390
1391         /* check if the next not void item is MARK */
1392         act = next_no_void_action(actions, act);
1393         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1394                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1395                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1396                 rte_flow_error_set(error, EINVAL,
1397                         RTE_FLOW_ERROR_TYPE_ACTION,
1398                         act, "Not supported action.");
1399                 return -rte_errno;
1400         }
1401
1402         rule->soft_id = 0;
1403
1404         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1405                 mark = (const struct rte_flow_action_mark *)act->conf;
1406                 rule->soft_id = mark->id;
1407                 act = next_no_void_action(actions, act);
1408         }
1409
1410         /* check if the next not void item is END */
1411         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1412                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1413                 rte_flow_error_set(error, EINVAL,
1414                         RTE_FLOW_ERROR_TYPE_ACTION,
1415                         act, "Not supported action.");
1416                 return -rte_errno;
1417         }
1418
1419         return 0;
1420 }
1421
1422 /* search next no void pattern and skip fuzzy */
1423 static inline
1424 const struct rte_flow_item *next_no_fuzzy_pattern(
1425                 const struct rte_flow_item pattern[],
1426                 const struct rte_flow_item *cur)
1427 {
1428         const struct rte_flow_item *next =
1429                 next_no_void_pattern(pattern, cur);
1430         while (1) {
1431                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1432                         return next;
1433                 next = next_no_void_pattern(pattern, next);
1434         }
1435 }
1436
1437 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1438 {
1439         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1440         const struct rte_flow_item *item;
1441         uint32_t sh, lh, mh;
1442         int i = 0;
1443
1444         while (1) {
1445                 item = pattern + i;
1446                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1447                         break;
1448
1449                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1450                         spec = item->spec;
1451                         last = item->last;
1452                         mask = item->mask;
1453
1454                         if (!spec || !mask)
1455                                 return 0;
1456
1457                         sh = spec->thresh;
1458
1459                         if (!last)
1460                                 lh = sh;
1461                         else
1462                                 lh = last->thresh;
1463
1464                         mh = mask->thresh;
1465                         sh = sh & mh;
1466                         lh = lh & mh;
1467
1468                         if (!sh || sh > lh)
1469                                 return 0;
1470
1471                         return 1;
1472                 }
1473
1474                 i++;
1475         }
1476
1477         return 0;
1478 }
1479
1480 /**
1481  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1482  * And get the flow director filter info BTW.
1483  * UDP/TCP/SCTP PATTERN:
1484  * The first not void item can be ETH or IPV4 or IPV6
1485  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1486  * The next not void item could be UDP or TCP or SCTP (optional)
1487  * The next not void item could be RAW (for flexbyte, optional)
1488  * The next not void item must be END.
1489  * A Fuzzy Match pattern can appear at any place before END.
1490  * Fuzzy Match is optional for IPV4 but is required for IPV6
1491  * MAC VLAN PATTERN:
1492  * The first not void item must be ETH.
1493  * The second not void item must be MAC VLAN.
1494  * The next not void item must be END.
1495  * ACTION:
1496  * The first not void action should be QUEUE or DROP.
1497  * The second not void optional action should be MARK,
1498  * mark_id is a uint32_t number.
1499  * The next not void action should be END.
1500  * UDP/TCP/SCTP pattern example:
1501  * ITEM         Spec                    Mask
1502  * ETH          NULL                    NULL
1503  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1504  *              dst_addr 192.167.3.50   0xFFFFFFFF
1505  * UDP/TCP/SCTP src_port        80      0xFFFF
1506  *              dst_port        80      0xFFFF
1507  * FLEX relative        0       0x1
1508  *              search          0       0x1
1509  *              reserved        0       0
1510  *              offset          12      0xFFFFFFFF
1511  *              limit           0       0xFFFF
1512  *              length          2       0xFFFF
1513  *              pattern[0]      0x86    0xFF
1514  *              pattern[1]      0xDD    0xFF
1515  * END
1516  * MAC VLAN pattern example:
1517  * ITEM         Spec                    Mask
1518  * ETH          dst_addr
1519                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1520                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1521  * MAC VLAN     tci     0x2016          0xEFFF
1522  * END
1523  * Other members in mask and spec should set to 0x00.
1524  * Item->last should be NULL.
1525  */
1526 static int
1527 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1528                                const struct rte_flow_attr *attr,
1529                                const struct rte_flow_item pattern[],
1530                                const struct rte_flow_action actions[],
1531                                struct ixgbe_fdir_rule *rule,
1532                                struct rte_flow_error *error)
1533 {
1534         const struct rte_flow_item *item;
1535         const struct rte_flow_item_eth *eth_spec;
1536         const struct rte_flow_item_eth *eth_mask;
1537         const struct rte_flow_item_ipv4 *ipv4_spec;
1538         const struct rte_flow_item_ipv4 *ipv4_mask;
1539         const struct rte_flow_item_ipv6 *ipv6_spec;
1540         const struct rte_flow_item_ipv6 *ipv6_mask;
1541         const struct rte_flow_item_tcp *tcp_spec;
1542         const struct rte_flow_item_tcp *tcp_mask;
1543         const struct rte_flow_item_udp *udp_spec;
1544         const struct rte_flow_item_udp *udp_mask;
1545         const struct rte_flow_item_sctp *sctp_spec;
1546         const struct rte_flow_item_sctp *sctp_mask;
1547         const struct rte_flow_item_vlan *vlan_spec;
1548         const struct rte_flow_item_vlan *vlan_mask;
1549         const struct rte_flow_item_raw *raw_mask;
1550         const struct rte_flow_item_raw *raw_spec;
1551         uint8_t j;
1552
1553         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1554
1555         if (!pattern) {
1556                 rte_flow_error_set(error, EINVAL,
1557                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1558                         NULL, "NULL pattern.");
1559                 return -rte_errno;
1560         }
1561
1562         if (!actions) {
1563                 rte_flow_error_set(error, EINVAL,
1564                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1565                                    NULL, "NULL action.");
1566                 return -rte_errno;
1567         }
1568
1569         if (!attr) {
1570                 rte_flow_error_set(error, EINVAL,
1571                                    RTE_FLOW_ERROR_TYPE_ATTR,
1572                                    NULL, "NULL attribute.");
1573                 return -rte_errno;
1574         }
1575
1576         /**
1577          * Some fields may not be provided. Set spec to 0 and mask to default
1578          * value. So, we need not do anything for the not provided fields later.
1579          */
1580         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1581         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1582         rule->mask.vlan_tci_mask = 0;
1583         rule->mask.flex_bytes_mask = 0;
1584
1585         /**
1586          * The first not void item should be
1587          * MAC or IPv4 or TCP or UDP or SCTP.
1588          */
1589         item = next_no_fuzzy_pattern(pattern, NULL);
1590         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1591             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1592             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1593             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1594             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1595             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1596                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1597                 rte_flow_error_set(error, EINVAL,
1598                         RTE_FLOW_ERROR_TYPE_ITEM,
1599                         item, "Not supported by fdir filter");
1600                 return -rte_errno;
1601         }
1602
1603         if (signature_match(pattern))
1604                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1605         else
1606                 rule->mode = RTE_FDIR_MODE_PERFECT;
1607
1608         /*Not supported last point for range*/
1609         if (item->last) {
1610                 rte_flow_error_set(error, EINVAL,
1611                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1612                         item, "Not supported last point for range");
1613                 return -rte_errno;
1614         }
1615
1616         /* Get the MAC info. */
1617         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1618                 /**
1619                  * Only support vlan and dst MAC address,
1620                  * others should be masked.
1621                  */
1622                 if (item->spec && !item->mask) {
1623                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1624                         rte_flow_error_set(error, EINVAL,
1625                                 RTE_FLOW_ERROR_TYPE_ITEM,
1626                                 item, "Not supported by fdir filter");
1627                         return -rte_errno;
1628                 }
1629
1630                 if (item->spec) {
1631                         rule->b_spec = TRUE;
1632                         eth_spec = item->spec;
1633
1634                         /* Get the dst MAC. */
1635                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1636                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1637                                         eth_spec->dst.addr_bytes[j];
1638                         }
1639                 }
1640
1641
1642                 if (item->mask) {
1643
1644                         rule->b_mask = TRUE;
1645                         eth_mask = item->mask;
1646
1647                         /* Ether type should be masked. */
1648                         if (eth_mask->type ||
1649                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1650                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1651                                 rte_flow_error_set(error, EINVAL,
1652                                         RTE_FLOW_ERROR_TYPE_ITEM,
1653                                         item, "Not supported by fdir filter");
1654                                 return -rte_errno;
1655                         }
1656
1657                         /* If ethernet has meaning, it means MAC VLAN mode. */
1658                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1659
1660                         /**
1661                          * src MAC address must be masked,
1662                          * and don't support dst MAC address mask.
1663                          */
1664                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1665                                 if (eth_mask->src.addr_bytes[j] ||
1666                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1667                                         memset(rule, 0,
1668                                         sizeof(struct ixgbe_fdir_rule));
1669                                         rte_flow_error_set(error, EINVAL,
1670                                         RTE_FLOW_ERROR_TYPE_ITEM,
1671                                         item, "Not supported by fdir filter");
1672                                         return -rte_errno;
1673                                 }
1674                         }
1675
1676                         /* When no VLAN, considered as full mask. */
1677                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1678                 }
1679                 /*** If both spec and mask are item,
1680                  * it means don't care about ETH.
1681                  * Do nothing.
1682                  */
1683
1684                 /**
1685                  * Check if the next not void item is vlan or ipv4.
1686                  * IPv6 is not supported.
1687                  */
1688                 item = next_no_fuzzy_pattern(pattern, item);
1689                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1690                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1691                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1692                                 rte_flow_error_set(error, EINVAL,
1693                                         RTE_FLOW_ERROR_TYPE_ITEM,
1694                                         item, "Not supported by fdir filter");
1695                                 return -rte_errno;
1696                         }
1697                 } else {
1698                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1699                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1700                                 rte_flow_error_set(error, EINVAL,
1701                                         RTE_FLOW_ERROR_TYPE_ITEM,
1702                                         item, "Not supported by fdir filter");
1703                                 return -rte_errno;
1704                         }
1705                 }
1706         }
1707
1708         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1709                 if (!(item->spec && item->mask)) {
1710                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1711                         rte_flow_error_set(error, EINVAL,
1712                                 RTE_FLOW_ERROR_TYPE_ITEM,
1713                                 item, "Not supported by fdir filter");
1714                         return -rte_errno;
1715                 }
1716
1717                 /*Not supported last point for range*/
1718                 if (item->last) {
1719                         rte_flow_error_set(error, EINVAL,
1720                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1721                                 item, "Not supported last point for range");
1722                         return -rte_errno;
1723                 }
1724
1725                 vlan_spec = item->spec;
1726                 vlan_mask = item->mask;
1727
1728                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1729
1730                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1731                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1732                 /* More than one tags are not supported. */
1733
1734                 /* Next not void item must be END */
1735                 item = next_no_fuzzy_pattern(pattern, item);
1736                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1737                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1738                         rte_flow_error_set(error, EINVAL,
1739                                 RTE_FLOW_ERROR_TYPE_ITEM,
1740                                 item, "Not supported by fdir filter");
1741                         return -rte_errno;
1742                 }
1743         }
1744
1745         /* Get the IPV4 info. */
1746         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1747                 /**
1748                  * Set the flow type even if there's no content
1749                  * as we must have a flow type.
1750                  */
1751                 rule->ixgbe_fdir.formatted.flow_type =
1752                         IXGBE_ATR_FLOW_TYPE_IPV4;
1753                 /*Not supported last point for range*/
1754                 if (item->last) {
1755                         rte_flow_error_set(error, EINVAL,
1756                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1757                                 item, "Not supported last point for range");
1758                         return -rte_errno;
1759                 }
1760                 /**
1761                  * Only care about src & dst addresses,
1762                  * others should be masked.
1763                  */
1764                 if (!item->mask) {
1765                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1766                         rte_flow_error_set(error, EINVAL,
1767                                 RTE_FLOW_ERROR_TYPE_ITEM,
1768                                 item, "Not supported by fdir filter");
1769                         return -rte_errno;
1770                 }
1771                 rule->b_mask = TRUE;
1772                 ipv4_mask = item->mask;
1773                 if (ipv4_mask->hdr.version_ihl ||
1774                     ipv4_mask->hdr.type_of_service ||
1775                     ipv4_mask->hdr.total_length ||
1776                     ipv4_mask->hdr.packet_id ||
1777                     ipv4_mask->hdr.fragment_offset ||
1778                     ipv4_mask->hdr.time_to_live ||
1779                     ipv4_mask->hdr.next_proto_id ||
1780                     ipv4_mask->hdr.hdr_checksum) {
1781                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1782                         rte_flow_error_set(error, EINVAL,
1783                                 RTE_FLOW_ERROR_TYPE_ITEM,
1784                                 item, "Not supported by fdir filter");
1785                         return -rte_errno;
1786                 }
1787                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1788                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1789
1790                 if (item->spec) {
1791                         rule->b_spec = TRUE;
1792                         ipv4_spec = item->spec;
1793                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1794                                 ipv4_spec->hdr.dst_addr;
1795                         rule->ixgbe_fdir.formatted.src_ip[0] =
1796                                 ipv4_spec->hdr.src_addr;
1797                 }
1798
1799                 /**
1800                  * Check if the next not void item is
1801                  * TCP or UDP or SCTP or END.
1802                  */
1803                 item = next_no_fuzzy_pattern(pattern, item);
1804                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1805                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1806                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1807                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1808                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1809                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1810                         rte_flow_error_set(error, EINVAL,
1811                                 RTE_FLOW_ERROR_TYPE_ITEM,
1812                                 item, "Not supported by fdir filter");
1813                         return -rte_errno;
1814                 }
1815         }
1816
1817         /* Get the IPV6 info. */
1818         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1819                 /**
1820                  * Set the flow type even if there's no content
1821                  * as we must have a flow type.
1822                  */
1823                 rule->ixgbe_fdir.formatted.flow_type =
1824                         IXGBE_ATR_FLOW_TYPE_IPV6;
1825
1826                 /**
1827                  * 1. must signature match
1828                  * 2. not support last
1829                  * 3. mask must not null
1830                  */
1831                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1832                     item->last ||
1833                     !item->mask) {
1834                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1835                         rte_flow_error_set(error, EINVAL,
1836                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1837                                 item, "Not supported last point for range");
1838                         return -rte_errno;
1839                 }
1840
1841                 rule->b_mask = TRUE;
1842                 ipv6_mask = item->mask;
1843                 if (ipv6_mask->hdr.vtc_flow ||
1844                     ipv6_mask->hdr.payload_len ||
1845                     ipv6_mask->hdr.proto ||
1846                     ipv6_mask->hdr.hop_limits) {
1847                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1848                         rte_flow_error_set(error, EINVAL,
1849                                 RTE_FLOW_ERROR_TYPE_ITEM,
1850                                 item, "Not supported by fdir filter");
1851                         return -rte_errno;
1852                 }
1853
1854                 /* check src addr mask */
1855                 for (j = 0; j < 16; j++) {
1856                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1857                                 rule->mask.src_ipv6_mask |= 1 << j;
1858                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1859                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1860                                 rte_flow_error_set(error, EINVAL,
1861                                         RTE_FLOW_ERROR_TYPE_ITEM,
1862                                         item, "Not supported by fdir filter");
1863                                 return -rte_errno;
1864                         }
1865                 }
1866
1867                 /* check dst addr mask */
1868                 for (j = 0; j < 16; j++) {
1869                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1870                                 rule->mask.dst_ipv6_mask |= 1 << j;
1871                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1872                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1873                                 rte_flow_error_set(error, EINVAL,
1874                                         RTE_FLOW_ERROR_TYPE_ITEM,
1875                                         item, "Not supported by fdir filter");
1876                                 return -rte_errno;
1877                         }
1878                 }
1879
1880                 if (item->spec) {
1881                         rule->b_spec = TRUE;
1882                         ipv6_spec = item->spec;
1883                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1884                                    ipv6_spec->hdr.src_addr, 16);
1885                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1886                                    ipv6_spec->hdr.dst_addr, 16);
1887                 }
1888
1889                 /**
1890                  * Check if the next not void item is
1891                  * TCP or UDP or SCTP or END.
1892                  */
1893                 item = next_no_fuzzy_pattern(pattern, item);
1894                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1895                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1896                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1897                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1898                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1899                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1900                         rte_flow_error_set(error, EINVAL,
1901                                 RTE_FLOW_ERROR_TYPE_ITEM,
1902                                 item, "Not supported by fdir filter");
1903                         return -rte_errno;
1904                 }
1905         }
1906
1907         /* Get the TCP info. */
1908         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1909                 /**
1910                  * Set the flow type even if there's no content
1911                  * as we must have a flow type.
1912                  */
1913                 rule->ixgbe_fdir.formatted.flow_type |=
1914                         IXGBE_ATR_L4TYPE_TCP;
1915                 /*Not supported last point for range*/
1916                 if (item->last) {
1917                         rte_flow_error_set(error, EINVAL,
1918                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1919                                 item, "Not supported last point for range");
1920                         return -rte_errno;
1921                 }
1922                 /**
1923                  * Only care about src & dst ports,
1924                  * others should be masked.
1925                  */
1926                 if (!item->mask) {
1927                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1928                         rte_flow_error_set(error, EINVAL,
1929                                 RTE_FLOW_ERROR_TYPE_ITEM,
1930                                 item, "Not supported by fdir filter");
1931                         return -rte_errno;
1932                 }
1933                 rule->b_mask = TRUE;
1934                 tcp_mask = item->mask;
1935                 if (tcp_mask->hdr.sent_seq ||
1936                     tcp_mask->hdr.recv_ack ||
1937                     tcp_mask->hdr.data_off ||
1938                     tcp_mask->hdr.tcp_flags ||
1939                     tcp_mask->hdr.rx_win ||
1940                     tcp_mask->hdr.cksum ||
1941                     tcp_mask->hdr.tcp_urp) {
1942                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1943                         rte_flow_error_set(error, EINVAL,
1944                                 RTE_FLOW_ERROR_TYPE_ITEM,
1945                                 item, "Not supported by fdir filter");
1946                         return -rte_errno;
1947                 }
1948                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1949                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1950
1951                 if (item->spec) {
1952                         rule->b_spec = TRUE;
1953                         tcp_spec = item->spec;
1954                         rule->ixgbe_fdir.formatted.src_port =
1955                                 tcp_spec->hdr.src_port;
1956                         rule->ixgbe_fdir.formatted.dst_port =
1957                                 tcp_spec->hdr.dst_port;
1958                 }
1959
1960                 item = next_no_fuzzy_pattern(pattern, item);
1961                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1962                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1963                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1964                         rte_flow_error_set(error, EINVAL,
1965                                 RTE_FLOW_ERROR_TYPE_ITEM,
1966                                 item, "Not supported by fdir filter");
1967                         return -rte_errno;
1968                 }
1969
1970         }
1971
1972         /* Get the UDP info */
1973         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1974                 /**
1975                  * Set the flow type even if there's no content
1976                  * as we must have a flow type.
1977                  */
1978                 rule->ixgbe_fdir.formatted.flow_type |=
1979                         IXGBE_ATR_L4TYPE_UDP;
1980                 /*Not supported last point for range*/
1981                 if (item->last) {
1982                         rte_flow_error_set(error, EINVAL,
1983                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1984                                 item, "Not supported last point for range");
1985                         return -rte_errno;
1986                 }
1987                 /**
1988                  * Only care about src & dst ports,
1989                  * others should be masked.
1990                  */
1991                 if (!item->mask) {
1992                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1993                         rte_flow_error_set(error, EINVAL,
1994                                 RTE_FLOW_ERROR_TYPE_ITEM,
1995                                 item, "Not supported by fdir filter");
1996                         return -rte_errno;
1997                 }
1998                 rule->b_mask = TRUE;
1999                 udp_mask = item->mask;
2000                 if (udp_mask->hdr.dgram_len ||
2001                     udp_mask->hdr.dgram_cksum) {
2002                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2003                         rte_flow_error_set(error, EINVAL,
2004                                 RTE_FLOW_ERROR_TYPE_ITEM,
2005                                 item, "Not supported by fdir filter");
2006                         return -rte_errno;
2007                 }
2008                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2009                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2010
2011                 if (item->spec) {
2012                         rule->b_spec = TRUE;
2013                         udp_spec = item->spec;
2014                         rule->ixgbe_fdir.formatted.src_port =
2015                                 udp_spec->hdr.src_port;
2016                         rule->ixgbe_fdir.formatted.dst_port =
2017                                 udp_spec->hdr.dst_port;
2018                 }
2019
2020                 item = next_no_fuzzy_pattern(pattern, item);
2021                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2022                     item->type != RTE_FLOW_ITEM_TYPE_END) {
2023                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2024                         rte_flow_error_set(error, EINVAL,
2025                                 RTE_FLOW_ERROR_TYPE_ITEM,
2026                                 item, "Not supported by fdir filter");
2027                         return -rte_errno;
2028                 }
2029
2030         }
2031
2032         /* Get the SCTP info */
2033         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2034                 /**
2035                  * Set the flow type even if there's no content
2036                  * as we must have a flow type.
2037                  */
2038                 rule->ixgbe_fdir.formatted.flow_type |=
2039                         IXGBE_ATR_L4TYPE_SCTP;
2040                 /*Not supported last point for range*/
2041                 if (item->last) {
2042                         rte_flow_error_set(error, EINVAL,
2043                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2044                                 item, "Not supported last point for range");
2045                         return -rte_errno;
2046                 }
2047
2048                 /* only x550 family only support sctp port */
2049                 if (hw->mac.type == ixgbe_mac_X550 ||
2050                     hw->mac.type == ixgbe_mac_X550EM_x ||
2051                     hw->mac.type == ixgbe_mac_X550EM_a) {
2052                         /**
2053                          * Only care about src & dst ports,
2054                          * others should be masked.
2055                          */
2056                         if (!item->mask) {
2057                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2058                                 rte_flow_error_set(error, EINVAL,
2059                                         RTE_FLOW_ERROR_TYPE_ITEM,
2060                                         item, "Not supported by fdir filter");
2061                                 return -rte_errno;
2062                         }
2063                         rule->b_mask = TRUE;
2064                         sctp_mask = item->mask;
2065                         if (sctp_mask->hdr.tag ||
2066                                 sctp_mask->hdr.cksum) {
2067                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2068                                 rte_flow_error_set(error, EINVAL,
2069                                         RTE_FLOW_ERROR_TYPE_ITEM,
2070                                         item, "Not supported by fdir filter");
2071                                 return -rte_errno;
2072                         }
2073                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2074                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2075
2076                         if (item->spec) {
2077                                 rule->b_spec = TRUE;
2078                                 sctp_spec = item->spec;
2079                                 rule->ixgbe_fdir.formatted.src_port =
2080                                         sctp_spec->hdr.src_port;
2081                                 rule->ixgbe_fdir.formatted.dst_port =
2082                                         sctp_spec->hdr.dst_port;
2083                         }
2084                 /* others even sctp port is not supported */
2085                 } else {
2086                         sctp_mask = item->mask;
2087                         if (sctp_mask &&
2088                                 (sctp_mask->hdr.src_port ||
2089                                  sctp_mask->hdr.dst_port ||
2090                                  sctp_mask->hdr.tag ||
2091                                  sctp_mask->hdr.cksum)) {
2092                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2093                                 rte_flow_error_set(error, EINVAL,
2094                                         RTE_FLOW_ERROR_TYPE_ITEM,
2095                                         item, "Not supported by fdir filter");
2096                                 return -rte_errno;
2097                         }
2098                 }
2099
2100                 item = next_no_fuzzy_pattern(pattern, item);
2101                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2102                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2103                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2104                         rte_flow_error_set(error, EINVAL,
2105                                 RTE_FLOW_ERROR_TYPE_ITEM,
2106                                 item, "Not supported by fdir filter");
2107                         return -rte_errno;
2108                 }
2109         }
2110
2111         /* Get the flex byte info */
2112         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2113                 /* Not supported last point for range*/
2114                 if (item->last) {
2115                         rte_flow_error_set(error, EINVAL,
2116                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2117                                 item, "Not supported last point for range");
2118                         return -rte_errno;
2119                 }
2120                 /* mask should not be null */
2121                 if (!item->mask || !item->spec) {
2122                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2123                         rte_flow_error_set(error, EINVAL,
2124                                 RTE_FLOW_ERROR_TYPE_ITEM,
2125                                 item, "Not supported by fdir filter");
2126                         return -rte_errno;
2127                 }
2128
2129                 raw_mask = item->mask;
2130
2131                 /* check mask */
2132                 if (raw_mask->relative != 0x1 ||
2133                     raw_mask->search != 0x1 ||
2134                     raw_mask->reserved != 0x0 ||
2135                     (uint32_t)raw_mask->offset != 0xffffffff ||
2136                     raw_mask->limit != 0xffff ||
2137                     raw_mask->length != 0xffff) {
2138                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2139                         rte_flow_error_set(error, EINVAL,
2140                                 RTE_FLOW_ERROR_TYPE_ITEM,
2141                                 item, "Not supported by fdir filter");
2142                         return -rte_errno;
2143                 }
2144
2145                 raw_spec = item->spec;
2146
2147                 /* check spec */
2148                 if (raw_spec->relative != 0 ||
2149                     raw_spec->search != 0 ||
2150                     raw_spec->reserved != 0 ||
2151                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2152                     raw_spec->offset % 2 ||
2153                     raw_spec->limit != 0 ||
2154                     raw_spec->length != 2 ||
2155                     /* pattern can't be 0xffff */
2156                     (raw_spec->pattern[0] == 0xff &&
2157                      raw_spec->pattern[1] == 0xff)) {
2158                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2159                         rte_flow_error_set(error, EINVAL,
2160                                 RTE_FLOW_ERROR_TYPE_ITEM,
2161                                 item, "Not supported by fdir filter");
2162                         return -rte_errno;
2163                 }
2164
2165                 /* check pattern mask */
2166                 if (raw_mask->pattern[0] != 0xff ||
2167                     raw_mask->pattern[1] != 0xff) {
2168                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2169                         rte_flow_error_set(error, EINVAL,
2170                                 RTE_FLOW_ERROR_TYPE_ITEM,
2171                                 item, "Not supported by fdir filter");
2172                         return -rte_errno;
2173                 }
2174
2175                 rule->mask.flex_bytes_mask = 0xffff;
2176                 rule->ixgbe_fdir.formatted.flex_bytes =
2177                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2178                         raw_spec->pattern[0];
2179                 rule->flex_bytes_offset = raw_spec->offset;
2180         }
2181
2182         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2183                 /* check if the next not void item is END */
2184                 item = next_no_fuzzy_pattern(pattern, item);
2185                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2186                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2187                         rte_flow_error_set(error, EINVAL,
2188                                 RTE_FLOW_ERROR_TYPE_ITEM,
2189                                 item, "Not supported by fdir filter");
2190                         return -rte_errno;
2191                 }
2192         }
2193
2194         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2195 }
2196
2197 #define NVGRE_PROTOCOL 0x6558
2198
2199 /**
2200  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2201  * And get the flow director filter info BTW.
2202  * VxLAN PATTERN:
2203  * The first not void item must be ETH.
2204  * The second not void item must be IPV4/ IPV6.
2205  * The third not void item must be NVGRE.
2206  * The next not void item must be END.
2207  * NVGRE PATTERN:
2208  * The first not void item must be ETH.
2209  * The second not void item must be IPV4/ IPV6.
2210  * The third not void item must be NVGRE.
2211  * The next not void item must be END.
2212  * ACTION:
2213  * The first not void action should be QUEUE or DROP.
2214  * The second not void optional action should be MARK,
2215  * mark_id is a uint32_t number.
2216  * The next not void action should be END.
2217  * VxLAN pattern example:
2218  * ITEM         Spec                    Mask
2219  * ETH          NULL                    NULL
2220  * IPV4/IPV6    NULL                    NULL
2221  * UDP          NULL                    NULL
2222  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2223  * MAC VLAN     tci     0x2016          0xEFFF
2224  * END
2225  * NEGRV pattern example:
2226  * ITEM         Spec                    Mask
2227  * ETH          NULL                    NULL
2228  * IPV4/IPV6    NULL                    NULL
2229  * NVGRE        protocol        0x6558  0xFFFF
2230  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2231  * MAC VLAN     tci     0x2016          0xEFFF
2232  * END
2233  * other members in mask and spec should set to 0x00.
2234  * item->last should be NULL.
2235  */
2236 static int
2237 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2238                                const struct rte_flow_item pattern[],
2239                                const struct rte_flow_action actions[],
2240                                struct ixgbe_fdir_rule *rule,
2241                                struct rte_flow_error *error)
2242 {
2243         const struct rte_flow_item *item;
2244         const struct rte_flow_item_vxlan *vxlan_spec;
2245         const struct rte_flow_item_vxlan *vxlan_mask;
2246         const struct rte_flow_item_nvgre *nvgre_spec;
2247         const struct rte_flow_item_nvgre *nvgre_mask;
2248         const struct rte_flow_item_eth *eth_spec;
2249         const struct rte_flow_item_eth *eth_mask;
2250         const struct rte_flow_item_vlan *vlan_spec;
2251         const struct rte_flow_item_vlan *vlan_mask;
2252         uint32_t j;
2253
2254         if (!pattern) {
2255                 rte_flow_error_set(error, EINVAL,
2256                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2257                                    NULL, "NULL pattern.");
2258                 return -rte_errno;
2259         }
2260
2261         if (!actions) {
2262                 rte_flow_error_set(error, EINVAL,
2263                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2264                                    NULL, "NULL action.");
2265                 return -rte_errno;
2266         }
2267
2268         if (!attr) {
2269                 rte_flow_error_set(error, EINVAL,
2270                                    RTE_FLOW_ERROR_TYPE_ATTR,
2271                                    NULL, "NULL attribute.");
2272                 return -rte_errno;
2273         }
2274
2275         /**
2276          * Some fields may not be provided. Set spec to 0 and mask to default
2277          * value. So, we need not do anything for the not provided fields later.
2278          */
2279         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2280         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2281         rule->mask.vlan_tci_mask = 0;
2282
2283         /**
2284          * The first not void item should be
2285          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2286          */
2287         item = next_no_void_pattern(pattern, NULL);
2288         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2289             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2290             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2291             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2292             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2293             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2294                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2295                 rte_flow_error_set(error, EINVAL,
2296                         RTE_FLOW_ERROR_TYPE_ITEM,
2297                         item, "Not supported by fdir filter");
2298                 return -rte_errno;
2299         }
2300
2301         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2302
2303         /* Skip MAC. */
2304         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2305                 /* Only used to describe the protocol stack. */
2306                 if (item->spec || item->mask) {
2307                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2308                         rte_flow_error_set(error, EINVAL,
2309                                 RTE_FLOW_ERROR_TYPE_ITEM,
2310                                 item, "Not supported by fdir filter");
2311                         return -rte_errno;
2312                 }
2313                 /* Not supported last point for range*/
2314                 if (item->last) {
2315                         rte_flow_error_set(error, EINVAL,
2316                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2317                                 item, "Not supported last point for range");
2318                         return -rte_errno;
2319                 }
2320
2321                 /* Check if the next not void item is IPv4 or IPv6. */
2322                 item = next_no_void_pattern(pattern, item);
2323                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2324                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2325                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2326                         rte_flow_error_set(error, EINVAL,
2327                                 RTE_FLOW_ERROR_TYPE_ITEM,
2328                                 item, "Not supported by fdir filter");
2329                         return -rte_errno;
2330                 }
2331         }
2332
2333         /* Skip IP. */
2334         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2335             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2336                 /* Only used to describe the protocol stack. */
2337                 if (item->spec || item->mask) {
2338                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2339                         rte_flow_error_set(error, EINVAL,
2340                                 RTE_FLOW_ERROR_TYPE_ITEM,
2341                                 item, "Not supported by fdir filter");
2342                         return -rte_errno;
2343                 }
2344                 /*Not supported last point for range*/
2345                 if (item->last) {
2346                         rte_flow_error_set(error, EINVAL,
2347                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2348                                 item, "Not supported last point for range");
2349                         return -rte_errno;
2350                 }
2351
2352                 /* Check if the next not void item is UDP or NVGRE. */
2353                 item = next_no_void_pattern(pattern, item);
2354                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2355                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2356                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2357                         rte_flow_error_set(error, EINVAL,
2358                                 RTE_FLOW_ERROR_TYPE_ITEM,
2359                                 item, "Not supported by fdir filter");
2360                         return -rte_errno;
2361                 }
2362         }
2363
2364         /* Skip UDP. */
2365         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2366                 /* Only used to describe the protocol stack. */
2367                 if (item->spec || item->mask) {
2368                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2369                         rte_flow_error_set(error, EINVAL,
2370                                 RTE_FLOW_ERROR_TYPE_ITEM,
2371                                 item, "Not supported by fdir filter");
2372                         return -rte_errno;
2373                 }
2374                 /*Not supported last point for range*/
2375                 if (item->last) {
2376                         rte_flow_error_set(error, EINVAL,
2377                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2378                                 item, "Not supported last point for range");
2379                         return -rte_errno;
2380                 }
2381
2382                 /* Check if the next not void item is VxLAN. */
2383                 item = next_no_void_pattern(pattern, item);
2384                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2385                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2386                         rte_flow_error_set(error, EINVAL,
2387                                 RTE_FLOW_ERROR_TYPE_ITEM,
2388                                 item, "Not supported by fdir filter");
2389                         return -rte_errno;
2390                 }
2391         }
2392
2393         /* Get the VxLAN info */
2394         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2395                 rule->ixgbe_fdir.formatted.tunnel_type =
2396                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2397
2398                 /* Only care about VNI, others should be masked. */
2399                 if (!item->mask) {
2400                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2401                         rte_flow_error_set(error, EINVAL,
2402                                 RTE_FLOW_ERROR_TYPE_ITEM,
2403                                 item, "Not supported by fdir filter");
2404                         return -rte_errno;
2405                 }
2406                 /*Not supported last point for range*/
2407                 if (item->last) {
2408                         rte_flow_error_set(error, EINVAL,
2409                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2410                                 item, "Not supported last point for range");
2411                         return -rte_errno;
2412                 }
2413                 rule->b_mask = TRUE;
2414
2415                 /* Tunnel type is always meaningful. */
2416                 rule->mask.tunnel_type_mask = 1;
2417
2418                 vxlan_mask = item->mask;
2419                 if (vxlan_mask->flags) {
2420                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2421                         rte_flow_error_set(error, EINVAL,
2422                                 RTE_FLOW_ERROR_TYPE_ITEM,
2423                                 item, "Not supported by fdir filter");
2424                         return -rte_errno;
2425                 }
2426                 /* VNI must be totally masked or not. */
2427                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2428                         vxlan_mask->vni[2]) &&
2429                         ((vxlan_mask->vni[0] != 0xFF) ||
2430                         (vxlan_mask->vni[1] != 0xFF) ||
2431                                 (vxlan_mask->vni[2] != 0xFF))) {
2432                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2433                         rte_flow_error_set(error, EINVAL,
2434                                 RTE_FLOW_ERROR_TYPE_ITEM,
2435                                 item, "Not supported by fdir filter");
2436                         return -rte_errno;
2437                 }
2438
2439                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2440                         RTE_DIM(vxlan_mask->vni));
2441
2442                 if (item->spec) {
2443                         rule->b_spec = TRUE;
2444                         vxlan_spec = item->spec;
2445                         rte_memcpy(((uint8_t *)
2446                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2447                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2448                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2449                                 rule->ixgbe_fdir.formatted.tni_vni);
2450                 }
2451         }
2452
2453         /* Get the NVGRE info */
2454         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2455                 rule->ixgbe_fdir.formatted.tunnel_type =
2456                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2457
2458                 /**
2459                  * Only care about flags0, flags1, protocol and TNI,
2460                  * others should be masked.
2461                  */
2462                 if (!item->mask) {
2463                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2464                         rte_flow_error_set(error, EINVAL,
2465                                 RTE_FLOW_ERROR_TYPE_ITEM,
2466                                 item, "Not supported by fdir filter");
2467                         return -rte_errno;
2468                 }
2469                 /*Not supported last point for range*/
2470                 if (item->last) {
2471                         rte_flow_error_set(error, EINVAL,
2472                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2473                                 item, "Not supported last point for range");
2474                         return -rte_errno;
2475                 }
2476                 rule->b_mask = TRUE;
2477
2478                 /* Tunnel type is always meaningful. */
2479                 rule->mask.tunnel_type_mask = 1;
2480
2481                 nvgre_mask = item->mask;
2482                 if (nvgre_mask->flow_id) {
2483                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2484                         rte_flow_error_set(error, EINVAL,
2485                                 RTE_FLOW_ERROR_TYPE_ITEM,
2486                                 item, "Not supported by fdir filter");
2487                         return -rte_errno;
2488                 }
2489                 if (nvgre_mask->protocol &&
2490                     nvgre_mask->protocol != 0xFFFF) {
2491                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2492                         rte_flow_error_set(error, EINVAL,
2493                                 RTE_FLOW_ERROR_TYPE_ITEM,
2494                                 item, "Not supported by fdir filter");
2495                         return -rte_errno;
2496                 }
2497                 if (nvgre_mask->c_k_s_rsvd0_ver &&
2498                     nvgre_mask->c_k_s_rsvd0_ver !=
2499                         rte_cpu_to_be_16(0xFFFF)) {
2500                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2501                         rte_flow_error_set(error, EINVAL,
2502                                 RTE_FLOW_ERROR_TYPE_ITEM,
2503                                 item, "Not supported by fdir filter");
2504                         return -rte_errno;
2505                 }
2506                 /* TNI must be totally masked or not. */
2507                 if (nvgre_mask->tni[0] &&
2508                     ((nvgre_mask->tni[0] != 0xFF) ||
2509                     (nvgre_mask->tni[1] != 0xFF) ||
2510                     (nvgre_mask->tni[2] != 0xFF))) {
2511                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2512                         rte_flow_error_set(error, EINVAL,
2513                                 RTE_FLOW_ERROR_TYPE_ITEM,
2514                                 item, "Not supported by fdir filter");
2515                         return -rte_errno;
2516                 }
2517                 /* tni is a 24-bits bit field */
2518                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2519                         RTE_DIM(nvgre_mask->tni));
2520                 rule->mask.tunnel_id_mask <<= 8;
2521
2522                 if (item->spec) {
2523                         rule->b_spec = TRUE;
2524                         nvgre_spec = item->spec;
2525                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2526                             rte_cpu_to_be_16(0x2000) &&
2527                                 nvgre_mask->c_k_s_rsvd0_ver) {
2528                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2529                                 rte_flow_error_set(error, EINVAL,
2530                                         RTE_FLOW_ERROR_TYPE_ITEM,
2531                                         item, "Not supported by fdir filter");
2532                                 return -rte_errno;
2533                         }
2534                         if (nvgre_mask->protocol &&
2535                             nvgre_spec->protocol !=
2536                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2537                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2538                                 rte_flow_error_set(error, EINVAL,
2539                                         RTE_FLOW_ERROR_TYPE_ITEM,
2540                                         item, "Not supported by fdir filter");
2541                                 return -rte_errno;
2542                         }
2543                         /* tni is a 24-bits bit field */
2544                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2545                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2546                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2547                 }
2548         }
2549
2550         /* check if the next not void item is MAC */
2551         item = next_no_void_pattern(pattern, item);
2552         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2553                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2554                 rte_flow_error_set(error, EINVAL,
2555                         RTE_FLOW_ERROR_TYPE_ITEM,
2556                         item, "Not supported by fdir filter");
2557                 return -rte_errno;
2558         }
2559
2560         /**
2561          * Only support vlan and dst MAC address,
2562          * others should be masked.
2563          */
2564
2565         if (!item->mask) {
2566                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2567                 rte_flow_error_set(error, EINVAL,
2568                         RTE_FLOW_ERROR_TYPE_ITEM,
2569                         item, "Not supported by fdir filter");
2570                 return -rte_errno;
2571         }
2572         /*Not supported last point for range*/
2573         if (item->last) {
2574                 rte_flow_error_set(error, EINVAL,
2575                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2576                         item, "Not supported last point for range");
2577                 return -rte_errno;
2578         }
2579         rule->b_mask = TRUE;
2580         eth_mask = item->mask;
2581
2582         /* Ether type should be masked. */
2583         if (eth_mask->type) {
2584                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2585                 rte_flow_error_set(error, EINVAL,
2586                         RTE_FLOW_ERROR_TYPE_ITEM,
2587                         item, "Not supported by fdir filter");
2588                 return -rte_errno;
2589         }
2590
2591         /* src MAC address should be masked. */
2592         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2593                 if (eth_mask->src.addr_bytes[j]) {
2594                         memset(rule, 0,
2595                                sizeof(struct ixgbe_fdir_rule));
2596                         rte_flow_error_set(error, EINVAL,
2597                                 RTE_FLOW_ERROR_TYPE_ITEM,
2598                                 item, "Not supported by fdir filter");
2599                         return -rte_errno;
2600                 }
2601         }
2602         rule->mask.mac_addr_byte_mask = 0;
2603         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2604                 /* It's a per byte mask. */
2605                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2606                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2607                 } else if (eth_mask->dst.addr_bytes[j]) {
2608                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2609                         rte_flow_error_set(error, EINVAL,
2610                                 RTE_FLOW_ERROR_TYPE_ITEM,
2611                                 item, "Not supported by fdir filter");
2612                         return -rte_errno;
2613                 }
2614         }
2615
2616         /* When no vlan, considered as full mask. */
2617         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2618
2619         if (item->spec) {
2620                 rule->b_spec = TRUE;
2621                 eth_spec = item->spec;
2622
2623                 /* Get the dst MAC. */
2624                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2625                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2626                                 eth_spec->dst.addr_bytes[j];
2627                 }
2628         }
2629
2630         /**
2631          * Check if the next not void item is vlan or ipv4.
2632          * IPv6 is not supported.
2633          */
2634         item = next_no_void_pattern(pattern, item);
2635         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2636                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2637                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2638                 rte_flow_error_set(error, EINVAL,
2639                         RTE_FLOW_ERROR_TYPE_ITEM,
2640                         item, "Not supported by fdir filter");
2641                 return -rte_errno;
2642         }
2643         /*Not supported last point for range*/
2644         if (item->last) {
2645                 rte_flow_error_set(error, EINVAL,
2646                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2647                         item, "Not supported last point for range");
2648                 return -rte_errno;
2649         }
2650
2651         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2652                 if (!(item->spec && item->mask)) {
2653                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2654                         rte_flow_error_set(error, EINVAL,
2655                                 RTE_FLOW_ERROR_TYPE_ITEM,
2656                                 item, "Not supported by fdir filter");
2657                         return -rte_errno;
2658                 }
2659
2660                 vlan_spec = item->spec;
2661                 vlan_mask = item->mask;
2662
2663                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2664
2665                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2666                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2667                 /* More than one tags are not supported. */
2668
2669                 /* check if the next not void item is END */
2670                 item = next_no_void_pattern(pattern, item);
2671
2672                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2673                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2674                         rte_flow_error_set(error, EINVAL,
2675                                 RTE_FLOW_ERROR_TYPE_ITEM,
2676                                 item, "Not supported by fdir filter");
2677                         return -rte_errno;
2678                 }
2679         }
2680
2681         /**
2682          * If the tags is 0, it means don't care about the VLAN.
2683          * Do nothing.
2684          */
2685
2686         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2687 }
2688
2689 static int
2690 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2691                         const struct rte_flow_attr *attr,
2692                         const struct rte_flow_item pattern[],
2693                         const struct rte_flow_action actions[],
2694                         struct ixgbe_fdir_rule *rule,
2695                         struct rte_flow_error *error)
2696 {
2697         int ret;
2698         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2699         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2700
2701         if (hw->mac.type != ixgbe_mac_82599EB &&
2702                 hw->mac.type != ixgbe_mac_X540 &&
2703                 hw->mac.type != ixgbe_mac_X550 &&
2704                 hw->mac.type != ixgbe_mac_X550EM_x &&
2705                 hw->mac.type != ixgbe_mac_X550EM_a)
2706                 return -ENOTSUP;
2707
2708         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2709                                         actions, rule, error);
2710
2711         if (!ret)
2712                 goto step_next;
2713
2714         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2715                                         actions, rule, error);
2716
2717         if (ret)
2718                 return ret;
2719
2720 step_next:
2721
2722         if (hw->mac.type == ixgbe_mac_82599EB &&
2723                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2724                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2725                 rule->ixgbe_fdir.formatted.dst_port != 0))
2726                 return -ENOTSUP;
2727
2728         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2729             fdir_mode != rule->mode)
2730                 return -ENOTSUP;
2731
2732         if (rule->queue >= dev->data->nb_rx_queues)
2733                 return -ENOTSUP;
2734
2735         return ret;
2736 }
2737
2738 static int
2739 ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
2740                         const struct rte_flow_attr *attr,
2741                         const struct rte_flow_action actions[],
2742                         struct ixgbe_rte_flow_rss_conf *rss_conf,
2743                         struct rte_flow_error *error)
2744 {
2745         const struct rte_flow_action *act;
2746         const struct rte_flow_action_rss *rss;
2747         uint16_t n;
2748
2749         /**
2750          * rss only supports forwarding,
2751          * check if the first not void action is RSS.
2752          */
2753         act = next_no_void_action(actions, NULL);
2754         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2755                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2756                 rte_flow_error_set(error, EINVAL,
2757                         RTE_FLOW_ERROR_TYPE_ACTION,
2758                         act, "Not supported action.");
2759                 return -rte_errno;
2760         }
2761
2762         rss = (const struct rte_flow_action_rss *)act->conf;
2763
2764         if (!rss || !rss->num) {
2765                 rte_flow_error_set(error, EINVAL,
2766                                 RTE_FLOW_ERROR_TYPE_ACTION,
2767                                 act,
2768                            "no valid queues");
2769                 return -rte_errno;
2770         }
2771
2772         for (n = 0; n < rss->num; n++) {
2773                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2774                         rte_flow_error_set(error, EINVAL,
2775                                    RTE_FLOW_ERROR_TYPE_ACTION,
2776                                    act,
2777                                    "queue id > max number of queues");
2778                         return -rte_errno;
2779                 }
2780         }
2781         if (rss->rss_conf)
2782                 rss_conf->rss_conf = *rss->rss_conf;
2783         else
2784                 rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
2785
2786         for (n = 0; n < rss->num; ++n)
2787                 rss_conf->queue[n] = rss->queue[n];
2788         rss_conf->num = rss->num;
2789
2790         /* check if the next not void item is END */
2791         act = next_no_void_action(actions, act);
2792         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2793                 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
2794                 rte_flow_error_set(error, EINVAL,
2795                         RTE_FLOW_ERROR_TYPE_ACTION,
2796                         act, "Not supported action.");
2797                 return -rte_errno;
2798         }
2799
2800         /* parse attr */
2801         /* must be input direction */
2802         if (!attr->ingress) {
2803                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2804                 rte_flow_error_set(error, EINVAL,
2805                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2806                                    attr, "Only support ingress.");
2807                 return -rte_errno;
2808         }
2809
2810         /* not supported */
2811         if (attr->egress) {
2812                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2813                 rte_flow_error_set(error, EINVAL,
2814                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2815                                    attr, "Not support egress.");
2816                 return -rte_errno;
2817         }
2818
2819         if (attr->priority > 0xFFFF) {
2820                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2821                 rte_flow_error_set(error, EINVAL,
2822                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2823                                    attr, "Error priority.");
2824                 return -rte_errno;
2825         }
2826
2827         return 0;
2828 }
2829
2830 /* remove the rss filter */
2831 static void
2832 ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
2833 {
2834         struct ixgbe_filter_info *filter_info =
2835                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2836
2837         if (filter_info->rss_info.num)
2838                 ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2839 }
2840
2841 void
2842 ixgbe_filterlist_init(void)
2843 {
2844         TAILQ_INIT(&filter_ntuple_list);
2845         TAILQ_INIT(&filter_ethertype_list);
2846         TAILQ_INIT(&filter_syn_list);
2847         TAILQ_INIT(&filter_fdir_list);
2848         TAILQ_INIT(&filter_l2_tunnel_list);
2849         TAILQ_INIT(&filter_rss_list);
2850         TAILQ_INIT(&ixgbe_flow_list);
2851 }
2852
2853 void
2854 ixgbe_filterlist_flush(void)
2855 {
2856         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2857         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2858         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2859         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2860         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2861         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2862         struct ixgbe_rss_conf_ele *rss_filter_ptr;
2863
2864         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2865                 TAILQ_REMOVE(&filter_ntuple_list,
2866                                  ntuple_filter_ptr,
2867                                  entries);
2868                 rte_free(ntuple_filter_ptr);
2869         }
2870
2871         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2872                 TAILQ_REMOVE(&filter_ethertype_list,
2873                                  ethertype_filter_ptr,
2874                                  entries);
2875                 rte_free(ethertype_filter_ptr);
2876         }
2877
2878         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2879                 TAILQ_REMOVE(&filter_syn_list,
2880                                  syn_filter_ptr,
2881                                  entries);
2882                 rte_free(syn_filter_ptr);
2883         }
2884
2885         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2886                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2887                                  l2_tn_filter_ptr,
2888                                  entries);
2889                 rte_free(l2_tn_filter_ptr);
2890         }
2891
2892         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2893                 TAILQ_REMOVE(&filter_fdir_list,
2894                                  fdir_rule_ptr,
2895                                  entries);
2896                 rte_free(fdir_rule_ptr);
2897         }
2898
2899         while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2900                 TAILQ_REMOVE(&filter_rss_list,
2901                                  rss_filter_ptr,
2902                                  entries);
2903                 rte_free(rss_filter_ptr);
2904         }
2905
2906         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2907                 TAILQ_REMOVE(&ixgbe_flow_list,
2908                                  ixgbe_flow_mem_ptr,
2909                                  entries);
2910                 rte_free(ixgbe_flow_mem_ptr->flow);
2911                 rte_free(ixgbe_flow_mem_ptr);
2912         }
2913 }
2914
2915 /**
2916  * Create or destroy a flow rule.
2917  * Theorically one rule can match more than one filters.
2918  * We will let it use the filter which it hitt first.
2919  * So, the sequence matters.
2920  */
2921 static struct rte_flow *
2922 ixgbe_flow_create(struct rte_eth_dev *dev,
2923                   const struct rte_flow_attr *attr,
2924                   const struct rte_flow_item pattern[],
2925                   const struct rte_flow_action actions[],
2926                   struct rte_flow_error *error)
2927 {
2928         int ret;
2929         struct rte_eth_ntuple_filter ntuple_filter;
2930         struct rte_eth_ethertype_filter ethertype_filter;
2931         struct rte_eth_syn_filter syn_filter;
2932         struct ixgbe_fdir_rule fdir_rule;
2933         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2934         struct ixgbe_hw_fdir_info *fdir_info =
2935                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2936         struct ixgbe_rte_flow_rss_conf rss_conf;
2937         struct rte_flow *flow = NULL;
2938         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2939         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2940         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2941         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2942         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2943         struct ixgbe_rss_conf_ele *rss_filter_ptr;
2944         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2945         uint8_t first_mask = FALSE;
2946
2947         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2948         if (!flow) {
2949                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2950                 return (struct rte_flow *)flow;
2951         }
2952         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2953                         sizeof(struct ixgbe_flow_mem), 0);
2954         if (!ixgbe_flow_mem_ptr) {
2955                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2956                 rte_free(flow);
2957                 return NULL;
2958         }
2959         ixgbe_flow_mem_ptr->flow = flow;
2960         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2961                                 ixgbe_flow_mem_ptr, entries);
2962
2963         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2964         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2965                         actions, &ntuple_filter, error);
2966
2967 #ifdef RTE_LIBRTE_SECURITY
2968         /* ESP flow not really a flow*/
2969         if (ntuple_filter.proto == IPPROTO_ESP)
2970                 return flow;
2971 #endif
2972
2973         if (!ret) {
2974                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2975                 if (!ret) {
2976                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2977                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2978                         if (!ntuple_filter_ptr) {
2979                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2980                                 goto out;
2981                         }
2982                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2983                                 &ntuple_filter,
2984                                 sizeof(struct rte_eth_ntuple_filter));
2985                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2986                                 ntuple_filter_ptr, entries);
2987                         flow->rule = ntuple_filter_ptr;
2988                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2989                         return flow;
2990                 }
2991                 goto out;
2992         }
2993
2994         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2995         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2996                                 actions, &ethertype_filter, error);
2997         if (!ret) {
2998                 ret = ixgbe_add_del_ethertype_filter(dev,
2999                                 &ethertype_filter, TRUE);
3000                 if (!ret) {
3001                         ethertype_filter_ptr = rte_zmalloc(
3002                                 "ixgbe_ethertype_filter",
3003                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
3004                         if (!ethertype_filter_ptr) {
3005                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3006                                 goto out;
3007                         }
3008                         rte_memcpy(&ethertype_filter_ptr->filter_info,
3009                                 &ethertype_filter,
3010                                 sizeof(struct rte_eth_ethertype_filter));
3011                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
3012                                 ethertype_filter_ptr, entries);
3013                         flow->rule = ethertype_filter_ptr;
3014                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3015                         return flow;
3016                 }
3017                 goto out;
3018         }
3019
3020         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3021         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3022                                 actions, &syn_filter, error);
3023         if (!ret) {
3024                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
3025                 if (!ret) {
3026                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
3027                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
3028                         if (!syn_filter_ptr) {
3029                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3030                                 goto out;
3031                         }
3032                         rte_memcpy(&syn_filter_ptr->filter_info,
3033                                 &syn_filter,
3034                                 sizeof(struct rte_eth_syn_filter));
3035                         TAILQ_INSERT_TAIL(&filter_syn_list,
3036                                 syn_filter_ptr,
3037                                 entries);
3038                         flow->rule = syn_filter_ptr;
3039                         flow->filter_type = RTE_ETH_FILTER_SYN;
3040                         return flow;
3041                 }
3042                 goto out;
3043         }
3044
3045         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3046         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3047                                 actions, &fdir_rule, error);
3048         if (!ret) {
3049                 /* A mask cannot be deleted. */
3050                 if (fdir_rule.b_mask) {
3051                         if (!fdir_info->mask_added) {
3052                                 /* It's the first time the mask is set. */
3053                                 rte_memcpy(&fdir_info->mask,
3054                                         &fdir_rule.mask,
3055                                         sizeof(struct ixgbe_hw_fdir_mask));
3056                                 fdir_info->flex_bytes_offset =
3057                                         fdir_rule.flex_bytes_offset;
3058
3059                                 if (fdir_rule.mask.flex_bytes_mask)
3060                                         ixgbe_fdir_set_flexbytes_offset(dev,
3061                                                 fdir_rule.flex_bytes_offset);
3062
3063                                 ret = ixgbe_fdir_set_input_mask(dev);
3064                                 if (ret)
3065                                         goto out;
3066
3067                                 fdir_info->mask_added = TRUE;
3068                                 first_mask = TRUE;
3069                         } else {
3070                                 /**
3071                                  * Only support one global mask,
3072                                  * all the masks should be the same.
3073                                  */
3074                                 ret = memcmp(&fdir_info->mask,
3075                                         &fdir_rule.mask,
3076                                         sizeof(struct ixgbe_hw_fdir_mask));
3077                                 if (ret)
3078                                         goto out;
3079
3080                                 if (fdir_info->flex_bytes_offset !=
3081                                                 fdir_rule.flex_bytes_offset)
3082                                         goto out;
3083                         }
3084                 }
3085
3086                 if (fdir_rule.b_spec) {
3087                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
3088                                         FALSE, FALSE);
3089                         if (!ret) {
3090                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
3091                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
3092                                 if (!fdir_rule_ptr) {
3093                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
3094                                         goto out;
3095                                 }
3096                                 rte_memcpy(&fdir_rule_ptr->filter_info,
3097                                         &fdir_rule,
3098                                         sizeof(struct ixgbe_fdir_rule));
3099                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
3100                                         fdir_rule_ptr, entries);
3101                                 flow->rule = fdir_rule_ptr;
3102                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
3103
3104                                 return flow;
3105                         }
3106
3107                         if (ret) {
3108                                 /**
3109                                  * clean the mask_added flag if fail to
3110                                  * program
3111                                  **/
3112                                 if (first_mask)
3113                                         fdir_info->mask_added = FALSE;
3114                                 goto out;
3115                         }
3116                 }
3117
3118                 goto out;
3119         }
3120
3121         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3122         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3123                                         actions, &l2_tn_filter, error);
3124         if (!ret) {
3125                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3126                 if (!ret) {
3127                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3128                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3129                         if (!l2_tn_filter_ptr) {
3130                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3131                                 goto out;
3132                         }
3133                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
3134                                 &l2_tn_filter,
3135                                 sizeof(struct rte_eth_l2_tunnel_conf));
3136                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3137                                 l2_tn_filter_ptr, entries);
3138                         flow->rule = l2_tn_filter_ptr;
3139                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3140                         return flow;
3141                 }
3142         }
3143
3144         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3145         ret = ixgbe_parse_rss_filter(dev, attr,
3146                                         actions, &rss_conf, error);
3147         if (!ret) {
3148                 ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
3149                 if (!ret) {
3150                         rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
3151                                 sizeof(struct ixgbe_rss_conf_ele), 0);
3152                         if (!rss_filter_ptr) {
3153                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3154                                 goto out;
3155                         }
3156                         rte_memcpy(&rss_filter_ptr->filter_info,
3157                                 &rss_conf,
3158                                 sizeof(struct ixgbe_rte_flow_rss_conf));
3159                         TAILQ_INSERT_TAIL(&filter_rss_list,
3160                                 rss_filter_ptr, entries);
3161                         flow->rule = rss_filter_ptr;
3162                         flow->filter_type = RTE_ETH_FILTER_HASH;
3163                         return flow;
3164                 }
3165         }
3166
3167 out:
3168         TAILQ_REMOVE(&ixgbe_flow_list,
3169                 ixgbe_flow_mem_ptr, entries);
3170         rte_flow_error_set(error, -ret,
3171                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3172                            "Failed to create flow.");
3173         rte_free(ixgbe_flow_mem_ptr);
3174         rte_free(flow);
3175         return NULL;
3176 }
3177
3178 /**
3179  * Check if the flow rule is supported by ixgbe.
3180  * It only checkes the format. Don't guarantee the rule can be programmed into
3181  * the HW. Because there can be no enough room for the rule.
3182  */
3183 static int
3184 ixgbe_flow_validate(struct rte_eth_dev *dev,
3185                 const struct rte_flow_attr *attr,
3186                 const struct rte_flow_item pattern[],
3187                 const struct rte_flow_action actions[],
3188                 struct rte_flow_error *error)
3189 {
3190         struct rte_eth_ntuple_filter ntuple_filter;
3191         struct rte_eth_ethertype_filter ethertype_filter;
3192         struct rte_eth_syn_filter syn_filter;
3193         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3194         struct ixgbe_fdir_rule fdir_rule;
3195         struct ixgbe_rte_flow_rss_conf rss_conf;
3196         int ret;
3197
3198         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3199         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3200                                 actions, &ntuple_filter, error);
3201         if (!ret)
3202                 return 0;
3203
3204         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3205         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3206                                 actions, &ethertype_filter, error);
3207         if (!ret)
3208                 return 0;
3209
3210         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3211         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3212                                 actions, &syn_filter, error);
3213         if (!ret)
3214                 return 0;
3215
3216         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3217         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3218                                 actions, &fdir_rule, error);
3219         if (!ret)
3220                 return 0;
3221
3222         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3223         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3224                                 actions, &l2_tn_filter, error);
3225         if (!ret)
3226                 return 0;
3227
3228         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3229         ret = ixgbe_parse_rss_filter(dev, attr,
3230                                         actions, &rss_conf, error);
3231
3232         return ret;
3233 }
3234
3235 /* Destroy a flow rule on ixgbe. */
3236 static int
3237 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3238                 struct rte_flow *flow,
3239                 struct rte_flow_error *error)
3240 {
3241         int ret;
3242         struct rte_flow *pmd_flow = flow;
3243         enum rte_filter_type filter_type = pmd_flow->filter_type;
3244         struct rte_eth_ntuple_filter ntuple_filter;
3245         struct rte_eth_ethertype_filter ethertype_filter;
3246         struct rte_eth_syn_filter syn_filter;
3247         struct ixgbe_fdir_rule fdir_rule;
3248         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3249         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3250         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3251         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3252         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3253         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3254         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3255         struct ixgbe_hw_fdir_info *fdir_info =
3256                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3257         struct ixgbe_rss_conf_ele *rss_filter_ptr;
3258
3259         switch (filter_type) {
3260         case RTE_ETH_FILTER_NTUPLE:
3261                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3262                                         pmd_flow->rule;
3263                 rte_memcpy(&ntuple_filter,
3264                         &ntuple_filter_ptr->filter_info,
3265                         sizeof(struct rte_eth_ntuple_filter));
3266                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3267                 if (!ret) {
3268                         TAILQ_REMOVE(&filter_ntuple_list,
3269                         ntuple_filter_ptr, entries);
3270                         rte_free(ntuple_filter_ptr);
3271                 }
3272                 break;
3273         case RTE_ETH_FILTER_ETHERTYPE:
3274                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3275                                         pmd_flow->rule;
3276                 rte_memcpy(&ethertype_filter,
3277                         &ethertype_filter_ptr->filter_info,
3278                         sizeof(struct rte_eth_ethertype_filter));
3279                 ret = ixgbe_add_del_ethertype_filter(dev,
3280                                 &ethertype_filter, FALSE);
3281                 if (!ret) {
3282                         TAILQ_REMOVE(&filter_ethertype_list,
3283                                 ethertype_filter_ptr, entries);
3284                         rte_free(ethertype_filter_ptr);
3285                 }
3286                 break;
3287         case RTE_ETH_FILTER_SYN:
3288                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3289                                 pmd_flow->rule;
3290                 rte_memcpy(&syn_filter,
3291                         &syn_filter_ptr->filter_info,
3292                         sizeof(struct rte_eth_syn_filter));
3293                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3294                 if (!ret) {
3295                         TAILQ_REMOVE(&filter_syn_list,
3296                                 syn_filter_ptr, entries);
3297                         rte_free(syn_filter_ptr);
3298                 }
3299                 break;
3300         case RTE_ETH_FILTER_FDIR:
3301                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3302                 rte_memcpy(&fdir_rule,
3303                         &fdir_rule_ptr->filter_info,
3304                         sizeof(struct ixgbe_fdir_rule));
3305                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3306                 if (!ret) {
3307                         TAILQ_REMOVE(&filter_fdir_list,
3308                                 fdir_rule_ptr, entries);
3309                         rte_free(fdir_rule_ptr);
3310                         if (TAILQ_EMPTY(&filter_fdir_list))
3311                                 fdir_info->mask_added = false;
3312                 }
3313                 break;
3314         case RTE_ETH_FILTER_L2_TUNNEL:
3315                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3316                                 pmd_flow->rule;
3317                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3318                         sizeof(struct rte_eth_l2_tunnel_conf));
3319                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3320                 if (!ret) {
3321                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3322                                 l2_tn_filter_ptr, entries);
3323                         rte_free(l2_tn_filter_ptr);
3324                 }
3325                 break;
3326         case RTE_ETH_FILTER_HASH:
3327                 rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
3328                                 pmd_flow->rule;
3329                 ret = ixgbe_config_rss_filter(dev,
3330                                         &rss_filter_ptr->filter_info, FALSE);
3331                 if (!ret) {
3332                         TAILQ_REMOVE(&filter_rss_list,
3333                                 rss_filter_ptr, entries);
3334                         rte_free(rss_filter_ptr);
3335                 }
3336                 break;
3337         default:
3338                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3339                             filter_type);
3340                 ret = -EINVAL;
3341                 break;
3342         }
3343
3344         if (ret) {
3345                 rte_flow_error_set(error, EINVAL,
3346                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3347                                 NULL, "Failed to destroy flow");
3348                 return ret;
3349         }
3350
3351         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3352                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3353                         TAILQ_REMOVE(&ixgbe_flow_list,
3354                                 ixgbe_flow_mem_ptr, entries);
3355                         rte_free(ixgbe_flow_mem_ptr);
3356                 }
3357         }
3358         rte_free(flow);
3359
3360         return ret;
3361 }
3362
3363 /*  Destroy all flow rules associated with a port on ixgbe. */
3364 static int
3365 ixgbe_flow_flush(struct rte_eth_dev *dev,
3366                 struct rte_flow_error *error)
3367 {
3368         int ret = 0;
3369
3370         ixgbe_clear_all_ntuple_filter(dev);
3371         ixgbe_clear_all_ethertype_filter(dev);
3372         ixgbe_clear_syn_filter(dev);
3373
3374         ret = ixgbe_clear_all_fdir_filter(dev);
3375         if (ret < 0) {
3376                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3377                                         NULL, "Failed to flush rule");
3378                 return ret;
3379         }
3380
3381         ret = ixgbe_clear_all_l2_tn_filter(dev);
3382         if (ret < 0) {
3383                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3384                                         NULL, "Failed to flush rule");
3385                 return ret;
3386         }
3387
3388         ixgbe_clear_rss_filter(dev);
3389
3390         ixgbe_filterlist_flush();
3391
3392         return 0;
3393 }
3394
3395 const struct rte_flow_ops ixgbe_flow_ops = {
3396         .validate = ixgbe_flow_validate,
3397         .create = ixgbe_flow_create,
3398         .destroy = ixgbe_flow_destroy,
3399         .flush = ixgbe_flow_flush,
3400 };