eb0644c82dcf0c5023a85c355e2a111d8aeeedff
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_eal.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
31 #include <rte_dev.h>
32 #include <rte_hash_crc.h>
33 #include <rte_flow.h>
34 #include <rte_flow_driver.h>
35
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
46
47
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
51
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55         struct rte_eth_ntuple_filter filter_info;
56 };
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60         struct rte_eth_ethertype_filter filter_info;
61 };
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65         struct rte_eth_syn_filter filter_info;
66 };
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70         struct ixgbe_fdir_rule filter_info;
71 };
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75         struct rte_eth_l2_tunnel_conf filter_info;
76 };
77 /* rss filter list structure */
78 struct ixgbe_rss_conf_ele {
79         TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
80         struct ixgbe_rte_flow_rss_conf filter_info;
81 };
82 /* ixgbe_flow memory list structure */
83 struct ixgbe_flow_mem {
84         TAILQ_ENTRY(ixgbe_flow_mem) entries;
85         struct rte_flow *flow;
86 };
87
88 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
89 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
90 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
91 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
92 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
93 TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
94 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
95
96 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
97 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
98 static struct ixgbe_syn_filter_list filter_syn_list;
99 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
100 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
101 static struct ixgbe_rss_filter_list filter_rss_list;
102 static struct ixgbe_flow_mem_list ixgbe_flow_list;
103
104 /**
105  * Endless loop will never happen with below assumption
106  * 1. there is at least one no-void item(END)
107  * 2. cur is before END.
108  */
109 static inline
110 const struct rte_flow_item *next_no_void_pattern(
111                 const struct rte_flow_item pattern[],
112                 const struct rte_flow_item *cur)
113 {
114         const struct rte_flow_item *next =
115                 cur ? cur + 1 : &pattern[0];
116         while (1) {
117                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
118                         return next;
119                 next++;
120         }
121 }
122
123 static inline
124 const struct rte_flow_action *next_no_void_action(
125                 const struct rte_flow_action actions[],
126                 const struct rte_flow_action *cur)
127 {
128         const struct rte_flow_action *next =
129                 cur ? cur + 1 : &actions[0];
130         while (1) {
131                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
132                         return next;
133                 next++;
134         }
135 }
136
137 /**
138  * Please aware there's an asumption for all the parsers.
139  * rte_flow_item is using big endian, rte_flow_attr and
140  * rte_flow_action are using CPU order.
141  * Because the pattern is used to describe the packets,
142  * normally the packets should use network order.
143  */
144
145 /**
146  * Parse the rule to see if it is a n-tuple rule.
147  * And get the n-tuple filter info BTW.
148  * pattern:
149  * The first not void item can be ETH or IPV4.
150  * The second not void item must be IPV4 if the first one is ETH.
151  * The third not void item must be UDP or TCP.
152  * The next not void item must be END.
153  * action:
154  * The first not void action should be QUEUE.
155  * The next not void action should be END.
156  * pattern example:
157  * ITEM         Spec                    Mask
158  * ETH          NULL                    NULL
159  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
160  *              dst_addr 192.167.3.50   0xFFFFFFFF
161  *              next_proto_id   17      0xFF
162  * UDP/TCP/     src_port        80      0xFFFF
163  * SCTP         dst_port        80      0xFFFF
164  * END
165  * other members in mask and spec should set to 0x00.
166  * item->last should be NULL.
167  *
168  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
169  *
170  */
171 static int
172 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
173                          const struct rte_flow_item pattern[],
174                          const struct rte_flow_action actions[],
175                          struct rte_eth_ntuple_filter *filter,
176                          struct rte_flow_error *error)
177 {
178         const struct rte_flow_item *item;
179         const struct rte_flow_action *act;
180         const struct rte_flow_item_ipv4 *ipv4_spec;
181         const struct rte_flow_item_ipv4 *ipv4_mask;
182         const struct rte_flow_item_tcp *tcp_spec;
183         const struct rte_flow_item_tcp *tcp_mask;
184         const struct rte_flow_item_udp *udp_spec;
185         const struct rte_flow_item_udp *udp_mask;
186         const struct rte_flow_item_sctp *sctp_spec;
187         const struct rte_flow_item_sctp *sctp_mask;
188         const struct rte_flow_item_eth *eth_spec;
189         const struct rte_flow_item_eth *eth_mask;
190         const struct rte_flow_item_vlan *vlan_spec;
191         const struct rte_flow_item_vlan *vlan_mask;
192         struct rte_flow_item_eth eth_null;
193         struct rte_flow_item_vlan vlan_null;
194
195         if (!pattern) {
196                 rte_flow_error_set(error,
197                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
198                         NULL, "NULL pattern.");
199                 return -rte_errno;
200         }
201
202         if (!actions) {
203                 rte_flow_error_set(error, EINVAL,
204                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
205                                    NULL, "NULL action.");
206                 return -rte_errno;
207         }
208         if (!attr) {
209                 rte_flow_error_set(error, EINVAL,
210                                    RTE_FLOW_ERROR_TYPE_ATTR,
211                                    NULL, "NULL attribute.");
212                 return -rte_errno;
213         }
214
215         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
216         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
217
218 #ifdef RTE_LIBRTE_SECURITY
219         /**
220          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
221          */
222         act = next_no_void_action(actions, NULL);
223         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
224                 const void *conf = act->conf;
225                 /* check if the next not void item is END */
226                 act = next_no_void_action(actions, act);
227                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
228                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229                         rte_flow_error_set(error, EINVAL,
230                                 RTE_FLOW_ERROR_TYPE_ACTION,
231                                 act, "Not supported action.");
232                         return -rte_errno;
233                 }
234
235                 /* get the IP pattern*/
236                 item = next_no_void_pattern(pattern, NULL);
237                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
238                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
239                         if (item->last ||
240                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
241                                 rte_flow_error_set(error, EINVAL,
242                                         RTE_FLOW_ERROR_TYPE_ITEM,
243                                         item, "IP pattern missing.");
244                                 return -rte_errno;
245                         }
246                         item = next_no_void_pattern(pattern, item);
247                 }
248
249                 filter->proto = IPPROTO_ESP;
250                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
251                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
252         }
253 #endif
254
255         /* the first not void item can be MAC or IPv4 */
256         item = next_no_void_pattern(pattern, NULL);
257
258         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
259             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260                 rte_flow_error_set(error, EINVAL,
261                         RTE_FLOW_ERROR_TYPE_ITEM,
262                         item, "Not supported by ntuple filter");
263                 return -rte_errno;
264         }
265         /* Skip Ethernet */
266         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
267                 eth_spec = item->spec;
268                 eth_mask = item->mask;
269                 /*Not supported last point for range*/
270                 if (item->last) {
271                         rte_flow_error_set(error,
272                           EINVAL,
273                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274                           item, "Not supported last point for range");
275                         return -rte_errno;
276
277                 }
278                 /* if the first item is MAC, the content should be NULL */
279                 if ((item->spec || item->mask) &&
280                         (memcmp(eth_spec, &eth_null,
281                                 sizeof(struct rte_flow_item_eth)) ||
282                          memcmp(eth_mask, &eth_null,
283                                 sizeof(struct rte_flow_item_eth)))) {
284                         rte_flow_error_set(error, EINVAL,
285                                 RTE_FLOW_ERROR_TYPE_ITEM,
286                                 item, "Not supported by ntuple filter");
287                         return -rte_errno;
288                 }
289                 /* check if the next not void item is IPv4 or Vlan */
290                 item = next_no_void_pattern(pattern, item);
291                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
292                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
293                         rte_flow_error_set(error,
294                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
295                           item, "Not supported by ntuple filter");
296                           return -rte_errno;
297                 }
298         }
299
300         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
301                 vlan_spec = item->spec;
302                 vlan_mask = item->mask;
303                 /*Not supported last point for range*/
304                 if (item->last) {
305                         rte_flow_error_set(error,
306                           EINVAL,
307                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
308                           item, "Not supported last point for range");
309                         return -rte_errno;
310                 }
311                 /* the content should be NULL */
312                 if ((item->spec || item->mask) &&
313                         (memcmp(vlan_spec, &vlan_null,
314                                 sizeof(struct rte_flow_item_vlan)) ||
315                          memcmp(vlan_mask, &vlan_null,
316                                 sizeof(struct rte_flow_item_vlan)))) {
317
318                         rte_flow_error_set(error, EINVAL,
319                                 RTE_FLOW_ERROR_TYPE_ITEM,
320                                 item, "Not supported by ntuple filter");
321                         return -rte_errno;
322                 }
323                 /* check if the next not void item is IPv4 */
324                 item = next_no_void_pattern(pattern, item);
325                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
326                         rte_flow_error_set(error,
327                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
328                           item, "Not supported by ntuple filter");
329                         return -rte_errno;
330                 }
331         }
332
333         if (item->mask) {
334                 /* get the IPv4 info */
335                 if (!item->spec || !item->mask) {
336                         rte_flow_error_set(error, EINVAL,
337                                 RTE_FLOW_ERROR_TYPE_ITEM,
338                                 item, "Invalid ntuple mask");
339                         return -rte_errno;
340                 }
341                 /*Not supported last point for range*/
342                 if (item->last) {
343                         rte_flow_error_set(error, EINVAL,
344                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
345                                 item, "Not supported last point for range");
346                         return -rte_errno;
347                 }
348
349                 ipv4_mask = item->mask;
350                 /**
351                  * Only support src & dst addresses, protocol,
352                  * others should be masked.
353                  */
354                 if (ipv4_mask->hdr.version_ihl ||
355                     ipv4_mask->hdr.type_of_service ||
356                     ipv4_mask->hdr.total_length ||
357                     ipv4_mask->hdr.packet_id ||
358                     ipv4_mask->hdr.fragment_offset ||
359                     ipv4_mask->hdr.time_to_live ||
360                     ipv4_mask->hdr.hdr_checksum) {
361                         rte_flow_error_set(error,
362                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363                                 item, "Not supported by ntuple filter");
364                         return -rte_errno;
365                 }
366
367                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
368                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
369                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
370
371                 ipv4_spec = item->spec;
372                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
373                 filter->src_ip = ipv4_spec->hdr.src_addr;
374                 filter->proto  = ipv4_spec->hdr.next_proto_id;
375         }
376
377         /* check if the next not void item is TCP or UDP */
378         item = next_no_void_pattern(pattern, item);
379         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
380             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
381             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
382             item->type != RTE_FLOW_ITEM_TYPE_END) {
383                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
384                 rte_flow_error_set(error, EINVAL,
385                         RTE_FLOW_ERROR_TYPE_ITEM,
386                         item, "Not supported by ntuple filter");
387                 return -rte_errno;
388         }
389
390         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
391                 (!item->spec && !item->mask)) {
392                 goto action;
393         }
394
395         /* get the TCP/UDP/SCTP info */
396         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
397                 (!item->spec || !item->mask)) {
398                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
399                 rte_flow_error_set(error, EINVAL,
400                         RTE_FLOW_ERROR_TYPE_ITEM,
401                         item, "Invalid ntuple mask");
402                 return -rte_errno;
403         }
404
405         /*Not supported last point for range*/
406         if (item->last) {
407                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408                 rte_flow_error_set(error, EINVAL,
409                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
410                         item, "Not supported last point for range");
411                 return -rte_errno;
412
413         }
414
415         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
416                 tcp_mask = item->mask;
417
418                 /**
419                  * Only support src & dst ports, tcp flags,
420                  * others should be masked.
421                  */
422                 if (tcp_mask->hdr.sent_seq ||
423                     tcp_mask->hdr.recv_ack ||
424                     tcp_mask->hdr.data_off ||
425                     tcp_mask->hdr.rx_win ||
426                     tcp_mask->hdr.cksum ||
427                     tcp_mask->hdr.tcp_urp) {
428                         memset(filter, 0,
429                                 sizeof(struct rte_eth_ntuple_filter));
430                         rte_flow_error_set(error, EINVAL,
431                                 RTE_FLOW_ERROR_TYPE_ITEM,
432                                 item, "Not supported by ntuple filter");
433                         return -rte_errno;
434                 }
435
436                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
437                 filter->src_port_mask  = tcp_mask->hdr.src_port;
438                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
439                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
440                 } else if (!tcp_mask->hdr.tcp_flags) {
441                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
442                 } else {
443                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
444                         rte_flow_error_set(error, EINVAL,
445                                 RTE_FLOW_ERROR_TYPE_ITEM,
446                                 item, "Not supported by ntuple filter");
447                         return -rte_errno;
448                 }
449
450                 tcp_spec = item->spec;
451                 filter->dst_port  = tcp_spec->hdr.dst_port;
452                 filter->src_port  = tcp_spec->hdr.src_port;
453                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
454         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
455                 udp_mask = item->mask;
456
457                 /**
458                  * Only support src & dst ports,
459                  * others should be masked.
460                  */
461                 if (udp_mask->hdr.dgram_len ||
462                     udp_mask->hdr.dgram_cksum) {
463                         memset(filter, 0,
464                                 sizeof(struct rte_eth_ntuple_filter));
465                         rte_flow_error_set(error, EINVAL,
466                                 RTE_FLOW_ERROR_TYPE_ITEM,
467                                 item, "Not supported by ntuple filter");
468                         return -rte_errno;
469                 }
470
471                 filter->dst_port_mask = udp_mask->hdr.dst_port;
472                 filter->src_port_mask = udp_mask->hdr.src_port;
473
474                 udp_spec = item->spec;
475                 filter->dst_port = udp_spec->hdr.dst_port;
476                 filter->src_port = udp_spec->hdr.src_port;
477         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
478                 sctp_mask = item->mask;
479
480                 /**
481                  * Only support src & dst ports,
482                  * others should be masked.
483                  */
484                 if (sctp_mask->hdr.tag ||
485                     sctp_mask->hdr.cksum) {
486                         memset(filter, 0,
487                                 sizeof(struct rte_eth_ntuple_filter));
488                         rte_flow_error_set(error, EINVAL,
489                                 RTE_FLOW_ERROR_TYPE_ITEM,
490                                 item, "Not supported by ntuple filter");
491                         return -rte_errno;
492                 }
493
494                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
495                 filter->src_port_mask = sctp_mask->hdr.src_port;
496
497                 sctp_spec = item->spec;
498                 filter->dst_port = sctp_spec->hdr.dst_port;
499                 filter->src_port = sctp_spec->hdr.src_port;
500         } else {
501                 goto action;
502         }
503
504         /* check if the next not void item is END */
505         item = next_no_void_pattern(pattern, item);
506         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
507                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
508                 rte_flow_error_set(error, EINVAL,
509                         RTE_FLOW_ERROR_TYPE_ITEM,
510                         item, "Not supported by ntuple filter");
511                 return -rte_errno;
512         }
513
514 action:
515
516         /**
517          * n-tuple only supports forwarding,
518          * check if the first not void action is QUEUE.
519          */
520         act = next_no_void_action(actions, NULL);
521         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
522                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
523                 rte_flow_error_set(error, EINVAL,
524                         RTE_FLOW_ERROR_TYPE_ACTION,
525                         item, "Not supported action.");
526                 return -rte_errno;
527         }
528         filter->queue =
529                 ((const struct rte_flow_action_queue *)act->conf)->index;
530
531         /* check if the next not void item is END */
532         act = next_no_void_action(actions, act);
533         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
534                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
535                 rte_flow_error_set(error, EINVAL,
536                         RTE_FLOW_ERROR_TYPE_ACTION,
537                         act, "Not supported action.");
538                 return -rte_errno;
539         }
540
541         /* parse attr */
542         /* must be input direction */
543         if (!attr->ingress) {
544                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
545                 rte_flow_error_set(error, EINVAL,
546                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
547                                    attr, "Only support ingress.");
548                 return -rte_errno;
549         }
550
551         /* not supported */
552         if (attr->egress) {
553                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
554                 rte_flow_error_set(error, EINVAL,
555                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
556                                    attr, "Not support egress.");
557                 return -rte_errno;
558         }
559
560         /* not supported */
561         if (attr->transfer) {
562                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
563                 rte_flow_error_set(error, EINVAL,
564                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
565                                    attr, "No support for transfer.");
566                 return -rte_errno;
567         }
568
569         if (attr->priority > 0xFFFF) {
570                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
571                 rte_flow_error_set(error, EINVAL,
572                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
573                                    attr, "Error priority.");
574                 return -rte_errno;
575         }
576         filter->priority = (uint16_t)attr->priority;
577         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
578             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
579             filter->priority = 1;
580
581         return 0;
582 }
583
584 /* a specific function for ixgbe because the flags is specific */
585 static int
586 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
587                           const struct rte_flow_attr *attr,
588                           const struct rte_flow_item pattern[],
589                           const struct rte_flow_action actions[],
590                           struct rte_eth_ntuple_filter *filter,
591                           struct rte_flow_error *error)
592 {
593         int ret;
594         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
595
596         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
597
598         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
599
600         if (ret)
601                 return ret;
602
603 #ifdef RTE_LIBRTE_SECURITY
604         /* ESP flow not really a flow*/
605         if (filter->proto == IPPROTO_ESP)
606                 return 0;
607 #endif
608
609         /* Ixgbe doesn't support tcp flags. */
610         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
611                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
612                 rte_flow_error_set(error, EINVAL,
613                                    RTE_FLOW_ERROR_TYPE_ITEM,
614                                    NULL, "Not supported by ntuple filter");
615                 return -rte_errno;
616         }
617
618         /* Ixgbe doesn't support many priorities. */
619         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
620             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
621                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
622                 rte_flow_error_set(error, EINVAL,
623                         RTE_FLOW_ERROR_TYPE_ITEM,
624                         NULL, "Priority not supported by ntuple filter");
625                 return -rte_errno;
626         }
627
628         if (filter->queue >= dev->data->nb_rx_queues)
629                 return -rte_errno;
630
631         /* fixed value for ixgbe */
632         filter->flags = RTE_5TUPLE_FLAGS;
633         return 0;
634 }
635
636 /**
637  * Parse the rule to see if it is a ethertype rule.
638  * And get the ethertype filter info BTW.
639  * pattern:
640  * The first not void item can be ETH.
641  * The next not void item must be END.
642  * action:
643  * The first not void action should be QUEUE.
644  * The next not void action should be END.
645  * pattern example:
646  * ITEM         Spec                    Mask
647  * ETH          type    0x0807          0xFFFF
648  * END
649  * other members in mask and spec should set to 0x00.
650  * item->last should be NULL.
651  */
652 static int
653 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
654                             const struct rte_flow_item *pattern,
655                             const struct rte_flow_action *actions,
656                             struct rte_eth_ethertype_filter *filter,
657                             struct rte_flow_error *error)
658 {
659         const struct rte_flow_item *item;
660         const struct rte_flow_action *act;
661         const struct rte_flow_item_eth *eth_spec;
662         const struct rte_flow_item_eth *eth_mask;
663         const struct rte_flow_action_queue *act_q;
664
665         if (!pattern) {
666                 rte_flow_error_set(error, EINVAL,
667                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
668                                 NULL, "NULL pattern.");
669                 return -rte_errno;
670         }
671
672         if (!actions) {
673                 rte_flow_error_set(error, EINVAL,
674                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
675                                 NULL, "NULL action.");
676                 return -rte_errno;
677         }
678
679         if (!attr) {
680                 rte_flow_error_set(error, EINVAL,
681                                    RTE_FLOW_ERROR_TYPE_ATTR,
682                                    NULL, "NULL attribute.");
683                 return -rte_errno;
684         }
685
686         item = next_no_void_pattern(pattern, NULL);
687         /* The first non-void item should be MAC. */
688         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
689                 rte_flow_error_set(error, EINVAL,
690                         RTE_FLOW_ERROR_TYPE_ITEM,
691                         item, "Not supported by ethertype filter");
692                 return -rte_errno;
693         }
694
695         /*Not supported last point for range*/
696         if (item->last) {
697                 rte_flow_error_set(error, EINVAL,
698                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
699                         item, "Not supported last point for range");
700                 return -rte_errno;
701         }
702
703         /* Get the MAC info. */
704         if (!item->spec || !item->mask) {
705                 rte_flow_error_set(error, EINVAL,
706                                 RTE_FLOW_ERROR_TYPE_ITEM,
707                                 item, "Not supported by ethertype filter");
708                 return -rte_errno;
709         }
710
711         eth_spec = item->spec;
712         eth_mask = item->mask;
713
714         /* Mask bits of source MAC address must be full of 0.
715          * Mask bits of destination MAC address must be full
716          * of 1 or full of 0.
717          */
718         if (!is_zero_ether_addr(&eth_mask->src) ||
719             (!is_zero_ether_addr(&eth_mask->dst) &&
720              !is_broadcast_ether_addr(&eth_mask->dst))) {
721                 rte_flow_error_set(error, EINVAL,
722                                 RTE_FLOW_ERROR_TYPE_ITEM,
723                                 item, "Invalid ether address mask");
724                 return -rte_errno;
725         }
726
727         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
728                 rte_flow_error_set(error, EINVAL,
729                                 RTE_FLOW_ERROR_TYPE_ITEM,
730                                 item, "Invalid ethertype mask");
731                 return -rte_errno;
732         }
733
734         /* If mask bits of destination MAC address
735          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
736          */
737         if (is_broadcast_ether_addr(&eth_mask->dst)) {
738                 filter->mac_addr = eth_spec->dst;
739                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
740         } else {
741                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
742         }
743         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
744
745         /* Check if the next non-void item is END. */
746         item = next_no_void_pattern(pattern, item);
747         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
748                 rte_flow_error_set(error, EINVAL,
749                                 RTE_FLOW_ERROR_TYPE_ITEM,
750                                 item, "Not supported by ethertype filter.");
751                 return -rte_errno;
752         }
753
754         /* Parse action */
755
756         act = next_no_void_action(actions, NULL);
757         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
758             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
759                 rte_flow_error_set(error, EINVAL,
760                                 RTE_FLOW_ERROR_TYPE_ACTION,
761                                 act, "Not supported action.");
762                 return -rte_errno;
763         }
764
765         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
766                 act_q = (const struct rte_flow_action_queue *)act->conf;
767                 filter->queue = act_q->index;
768         } else {
769                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
770         }
771
772         /* Check if the next non-void item is END */
773         act = next_no_void_action(actions, act);
774         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
775                 rte_flow_error_set(error, EINVAL,
776                                 RTE_FLOW_ERROR_TYPE_ACTION,
777                                 act, "Not supported action.");
778                 return -rte_errno;
779         }
780
781         /* Parse attr */
782         /* Must be input direction */
783         if (!attr->ingress) {
784                 rte_flow_error_set(error, EINVAL,
785                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
786                                 attr, "Only support ingress.");
787                 return -rte_errno;
788         }
789
790         /* Not supported */
791         if (attr->egress) {
792                 rte_flow_error_set(error, EINVAL,
793                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
794                                 attr, "Not support egress.");
795                 return -rte_errno;
796         }
797
798         /* Not supported */
799         if (attr->transfer) {
800                 rte_flow_error_set(error, EINVAL,
801                                 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
802                                 attr, "No support for transfer.");
803                 return -rte_errno;
804         }
805
806         /* Not supported */
807         if (attr->priority) {
808                 rte_flow_error_set(error, EINVAL,
809                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
810                                 attr, "Not support priority.");
811                 return -rte_errno;
812         }
813
814         /* Not supported */
815         if (attr->group) {
816                 rte_flow_error_set(error, EINVAL,
817                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
818                                 attr, "Not support group.");
819                 return -rte_errno;
820         }
821
822         return 0;
823 }
824
825 static int
826 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
827                                  const struct rte_flow_attr *attr,
828                              const struct rte_flow_item pattern[],
829                              const struct rte_flow_action actions[],
830                              struct rte_eth_ethertype_filter *filter,
831                              struct rte_flow_error *error)
832 {
833         int ret;
834         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
835
836         MAC_TYPE_FILTER_SUP(hw->mac.type);
837
838         ret = cons_parse_ethertype_filter(attr, pattern,
839                                         actions, filter, error);
840
841         if (ret)
842                 return ret;
843
844         /* Ixgbe doesn't support MAC address. */
845         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
846                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
847                 rte_flow_error_set(error, EINVAL,
848                         RTE_FLOW_ERROR_TYPE_ITEM,
849                         NULL, "Not supported by ethertype filter");
850                 return -rte_errno;
851         }
852
853         if (filter->queue >= dev->data->nb_rx_queues) {
854                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
855                 rte_flow_error_set(error, EINVAL,
856                         RTE_FLOW_ERROR_TYPE_ITEM,
857                         NULL, "queue index much too big");
858                 return -rte_errno;
859         }
860
861         if (filter->ether_type == ETHER_TYPE_IPv4 ||
862                 filter->ether_type == ETHER_TYPE_IPv6) {
863                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
864                 rte_flow_error_set(error, EINVAL,
865                         RTE_FLOW_ERROR_TYPE_ITEM,
866                         NULL, "IPv4/IPv6 not supported by ethertype filter");
867                 return -rte_errno;
868         }
869
870         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
871                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
872                 rte_flow_error_set(error, EINVAL,
873                         RTE_FLOW_ERROR_TYPE_ITEM,
874                         NULL, "mac compare is unsupported");
875                 return -rte_errno;
876         }
877
878         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
879                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
880                 rte_flow_error_set(error, EINVAL,
881                         RTE_FLOW_ERROR_TYPE_ITEM,
882                         NULL, "drop option is unsupported");
883                 return -rte_errno;
884         }
885
886         return 0;
887 }
888
889 /**
890  * Parse the rule to see if it is a TCP SYN rule.
891  * And get the TCP SYN filter info BTW.
892  * pattern:
893  * The first not void item must be ETH.
894  * The second not void item must be IPV4 or IPV6.
895  * The third not void item must be TCP.
896  * The next not void item must be END.
897  * action:
898  * The first not void action should be QUEUE.
899  * The next not void action should be END.
900  * pattern example:
901  * ITEM         Spec                    Mask
902  * ETH          NULL                    NULL
903  * IPV4/IPV6    NULL                    NULL
904  * TCP          tcp_flags       0x02    0xFF
905  * END
906  * other members in mask and spec should set to 0x00.
907  * item->last should be NULL.
908  */
909 static int
910 cons_parse_syn_filter(const struct rte_flow_attr *attr,
911                                 const struct rte_flow_item pattern[],
912                                 const struct rte_flow_action actions[],
913                                 struct rte_eth_syn_filter *filter,
914                                 struct rte_flow_error *error)
915 {
916         const struct rte_flow_item *item;
917         const struct rte_flow_action *act;
918         const struct rte_flow_item_tcp *tcp_spec;
919         const struct rte_flow_item_tcp *tcp_mask;
920         const struct rte_flow_action_queue *act_q;
921
922         if (!pattern) {
923                 rte_flow_error_set(error, EINVAL,
924                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
925                                 NULL, "NULL pattern.");
926                 return -rte_errno;
927         }
928
929         if (!actions) {
930                 rte_flow_error_set(error, EINVAL,
931                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
932                                 NULL, "NULL action.");
933                 return -rte_errno;
934         }
935
936         if (!attr) {
937                 rte_flow_error_set(error, EINVAL,
938                                    RTE_FLOW_ERROR_TYPE_ATTR,
939                                    NULL, "NULL attribute.");
940                 return -rte_errno;
941         }
942
943
944         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
945         item = next_no_void_pattern(pattern, NULL);
946         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
947             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
948             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
949             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
950                 rte_flow_error_set(error, EINVAL,
951                                 RTE_FLOW_ERROR_TYPE_ITEM,
952                                 item, "Not supported by syn filter");
953                 return -rte_errno;
954         }
955                 /*Not supported last point for range*/
956         if (item->last) {
957                 rte_flow_error_set(error, EINVAL,
958                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
959                         item, "Not supported last point for range");
960                 return -rte_errno;
961         }
962
963         /* Skip Ethernet */
964         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
965                 /* if the item is MAC, the content should be NULL */
966                 if (item->spec || item->mask) {
967                         rte_flow_error_set(error, EINVAL,
968                                 RTE_FLOW_ERROR_TYPE_ITEM,
969                                 item, "Invalid SYN address mask");
970                         return -rte_errno;
971                 }
972
973                 /* check if the next not void item is IPv4 or IPv6 */
974                 item = next_no_void_pattern(pattern, item);
975                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
976                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
977                         rte_flow_error_set(error, EINVAL,
978                                 RTE_FLOW_ERROR_TYPE_ITEM,
979                                 item, "Not supported by syn filter");
980                         return -rte_errno;
981                 }
982         }
983
984         /* Skip IP */
985         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
986             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
987                 /* if the item is IP, the content should be NULL */
988                 if (item->spec || item->mask) {
989                         rte_flow_error_set(error, EINVAL,
990                                 RTE_FLOW_ERROR_TYPE_ITEM,
991                                 item, "Invalid SYN mask");
992                         return -rte_errno;
993                 }
994
995                 /* check if the next not void item is TCP */
996                 item = next_no_void_pattern(pattern, item);
997                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
998                         rte_flow_error_set(error, EINVAL,
999                                 RTE_FLOW_ERROR_TYPE_ITEM,
1000                                 item, "Not supported by syn filter");
1001                         return -rte_errno;
1002                 }
1003         }
1004
1005         /* Get the TCP info. Only support SYN. */
1006         if (!item->spec || !item->mask) {
1007                 rte_flow_error_set(error, EINVAL,
1008                                 RTE_FLOW_ERROR_TYPE_ITEM,
1009                                 item, "Invalid SYN mask");
1010                 return -rte_errno;
1011         }
1012         /*Not supported last point for range*/
1013         if (item->last) {
1014                 rte_flow_error_set(error, EINVAL,
1015                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1016                         item, "Not supported last point for range");
1017                 return -rte_errno;
1018         }
1019
1020         tcp_spec = item->spec;
1021         tcp_mask = item->mask;
1022         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
1023             tcp_mask->hdr.src_port ||
1024             tcp_mask->hdr.dst_port ||
1025             tcp_mask->hdr.sent_seq ||
1026             tcp_mask->hdr.recv_ack ||
1027             tcp_mask->hdr.data_off ||
1028             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
1029             tcp_mask->hdr.rx_win ||
1030             tcp_mask->hdr.cksum ||
1031             tcp_mask->hdr.tcp_urp) {
1032                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033                 rte_flow_error_set(error, EINVAL,
1034                                 RTE_FLOW_ERROR_TYPE_ITEM,
1035                                 item, "Not supported by syn filter");
1036                 return -rte_errno;
1037         }
1038
1039         /* check if the next not void item is END */
1040         item = next_no_void_pattern(pattern, item);
1041         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1042                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1043                 rte_flow_error_set(error, EINVAL,
1044                                 RTE_FLOW_ERROR_TYPE_ITEM,
1045                                 item, "Not supported by syn filter");
1046                 return -rte_errno;
1047         }
1048
1049         /* check if the first not void action is QUEUE. */
1050         act = next_no_void_action(actions, NULL);
1051         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1052                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1053                 rte_flow_error_set(error, EINVAL,
1054                                 RTE_FLOW_ERROR_TYPE_ACTION,
1055                                 act, "Not supported action.");
1056                 return -rte_errno;
1057         }
1058
1059         act_q = (const struct rte_flow_action_queue *)act->conf;
1060         filter->queue = act_q->index;
1061         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1062                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1063                 rte_flow_error_set(error, EINVAL,
1064                                 RTE_FLOW_ERROR_TYPE_ACTION,
1065                                 act, "Not supported action.");
1066                 return -rte_errno;
1067         }
1068
1069         /* check if the next not void item is END */
1070         act = next_no_void_action(actions, act);
1071         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1072                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1073                 rte_flow_error_set(error, EINVAL,
1074                                 RTE_FLOW_ERROR_TYPE_ACTION,
1075                                 act, "Not supported action.");
1076                 return -rte_errno;
1077         }
1078
1079         /* parse attr */
1080         /* must be input direction */
1081         if (!attr->ingress) {
1082                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1083                 rte_flow_error_set(error, EINVAL,
1084                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1085                         attr, "Only support ingress.");
1086                 return -rte_errno;
1087         }
1088
1089         /* not supported */
1090         if (attr->egress) {
1091                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1092                 rte_flow_error_set(error, EINVAL,
1093                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1094                         attr, "Not support egress.");
1095                 return -rte_errno;
1096         }
1097
1098         /* not supported */
1099         if (attr->transfer) {
1100                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1101                 rte_flow_error_set(error, EINVAL,
1102                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1103                         attr, "No support for transfer.");
1104                 return -rte_errno;
1105         }
1106
1107         /* Support 2 priorities, the lowest or highest. */
1108         if (!attr->priority) {
1109                 filter->hig_pri = 0;
1110         } else if (attr->priority == (uint32_t)~0U) {
1111                 filter->hig_pri = 1;
1112         } else {
1113                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1114                 rte_flow_error_set(error, EINVAL,
1115                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1116                         attr, "Not support priority.");
1117                 return -rte_errno;
1118         }
1119
1120         return 0;
1121 }
1122
1123 static int
1124 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1125                                  const struct rte_flow_attr *attr,
1126                              const struct rte_flow_item pattern[],
1127                              const struct rte_flow_action actions[],
1128                              struct rte_eth_syn_filter *filter,
1129                              struct rte_flow_error *error)
1130 {
1131         int ret;
1132         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1133
1134         MAC_TYPE_FILTER_SUP(hw->mac.type);
1135
1136         ret = cons_parse_syn_filter(attr, pattern,
1137                                         actions, filter, error);
1138
1139         if (filter->queue >= dev->data->nb_rx_queues)
1140                 return -rte_errno;
1141
1142         if (ret)
1143                 return ret;
1144
1145         return 0;
1146 }
1147
1148 /**
1149  * Parse the rule to see if it is a L2 tunnel rule.
1150  * And get the L2 tunnel filter info BTW.
1151  * Only support E-tag now.
1152  * pattern:
1153  * The first not void item can be E_TAG.
1154  * The next not void item must be END.
1155  * action:
1156  * The first not void action should be VF or PF.
1157  * The next not void action should be END.
1158  * pattern example:
1159  * ITEM         Spec                    Mask
1160  * E_TAG        grp             0x1     0x3
1161                 e_cid_base      0x309   0xFFF
1162  * END
1163  * other members in mask and spec should set to 0x00.
1164  * item->last should be NULL.
1165  */
1166 static int
1167 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1168                         const struct rte_flow_attr *attr,
1169                         const struct rte_flow_item pattern[],
1170                         const struct rte_flow_action actions[],
1171                         struct rte_eth_l2_tunnel_conf *filter,
1172                         struct rte_flow_error *error)
1173 {
1174         const struct rte_flow_item *item;
1175         const struct rte_flow_item_e_tag *e_tag_spec;
1176         const struct rte_flow_item_e_tag *e_tag_mask;
1177         const struct rte_flow_action *act;
1178         const struct rte_flow_action_vf *act_vf;
1179         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1180
1181         if (!pattern) {
1182                 rte_flow_error_set(error, EINVAL,
1183                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1184                         NULL, "NULL pattern.");
1185                 return -rte_errno;
1186         }
1187
1188         if (!actions) {
1189                 rte_flow_error_set(error, EINVAL,
1190                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1191                                    NULL, "NULL action.");
1192                 return -rte_errno;
1193         }
1194
1195         if (!attr) {
1196                 rte_flow_error_set(error, EINVAL,
1197                                    RTE_FLOW_ERROR_TYPE_ATTR,
1198                                    NULL, "NULL attribute.");
1199                 return -rte_errno;
1200         }
1201
1202         /* The first not void item should be e-tag. */
1203         item = next_no_void_pattern(pattern, NULL);
1204         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1205                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1206                 rte_flow_error_set(error, EINVAL,
1207                         RTE_FLOW_ERROR_TYPE_ITEM,
1208                         item, "Not supported by L2 tunnel filter");
1209                 return -rte_errno;
1210         }
1211
1212         if (!item->spec || !item->mask) {
1213                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1214                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1215                         item, "Not supported by L2 tunnel filter");
1216                 return -rte_errno;
1217         }
1218
1219         /*Not supported last point for range*/
1220         if (item->last) {
1221                 rte_flow_error_set(error, EINVAL,
1222                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1223                         item, "Not supported last point for range");
1224                 return -rte_errno;
1225         }
1226
1227         e_tag_spec = item->spec;
1228         e_tag_mask = item->mask;
1229
1230         /* Only care about GRP and E cid base. */
1231         if (e_tag_mask->epcp_edei_in_ecid_b ||
1232             e_tag_mask->in_ecid_e ||
1233             e_tag_mask->ecid_e ||
1234             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1235                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1236                 rte_flow_error_set(error, EINVAL,
1237                         RTE_FLOW_ERROR_TYPE_ITEM,
1238                         item, "Not supported by L2 tunnel filter");
1239                 return -rte_errno;
1240         }
1241
1242         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1243         /**
1244          * grp and e_cid_base are bit fields and only use 14 bits.
1245          * e-tag id is taken as little endian by HW.
1246          */
1247         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1248
1249         /* check if the next not void item is END */
1250         item = next_no_void_pattern(pattern, item);
1251         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1252                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1253                 rte_flow_error_set(error, EINVAL,
1254                         RTE_FLOW_ERROR_TYPE_ITEM,
1255                         item, "Not supported by L2 tunnel filter");
1256                 return -rte_errno;
1257         }
1258
1259         /* parse attr */
1260         /* must be input direction */
1261         if (!attr->ingress) {
1262                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1263                 rte_flow_error_set(error, EINVAL,
1264                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1265                         attr, "Only support ingress.");
1266                 return -rte_errno;
1267         }
1268
1269         /* not supported */
1270         if (attr->egress) {
1271                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1272                 rte_flow_error_set(error, EINVAL,
1273                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1274                         attr, "Not support egress.");
1275                 return -rte_errno;
1276         }
1277
1278         /* not supported */
1279         if (attr->transfer) {
1280                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1281                 rte_flow_error_set(error, EINVAL,
1282                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1283                         attr, "No support for transfer.");
1284                 return -rte_errno;
1285         }
1286
1287         /* not supported */
1288         if (attr->priority) {
1289                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1290                 rte_flow_error_set(error, EINVAL,
1291                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1292                         attr, "Not support priority.");
1293                 return -rte_errno;
1294         }
1295
1296         /* check if the first not void action is VF or PF. */
1297         act = next_no_void_action(actions, NULL);
1298         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1299                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1300                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1301                 rte_flow_error_set(error, EINVAL,
1302                         RTE_FLOW_ERROR_TYPE_ACTION,
1303                         act, "Not supported action.");
1304                 return -rte_errno;
1305         }
1306
1307         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1308                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1309                 filter->pool = act_vf->id;
1310         } else {
1311                 filter->pool = pci_dev->max_vfs;
1312         }
1313
1314         /* check if the next not void item is END */
1315         act = next_no_void_action(actions, act);
1316         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1317                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1318                 rte_flow_error_set(error, EINVAL,
1319                         RTE_FLOW_ERROR_TYPE_ACTION,
1320                         act, "Not supported action.");
1321                 return -rte_errno;
1322         }
1323
1324         return 0;
1325 }
1326
1327 static int
1328 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1329                         const struct rte_flow_attr *attr,
1330                         const struct rte_flow_item pattern[],
1331                         const struct rte_flow_action actions[],
1332                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1333                         struct rte_flow_error *error)
1334 {
1335         int ret = 0;
1336         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1338         uint16_t vf_num;
1339
1340         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1341                                 actions, l2_tn_filter, error);
1342
1343         if (hw->mac.type != ixgbe_mac_X550 &&
1344                 hw->mac.type != ixgbe_mac_X550EM_x &&
1345                 hw->mac.type != ixgbe_mac_X550EM_a) {
1346                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1347                 rte_flow_error_set(error, EINVAL,
1348                         RTE_FLOW_ERROR_TYPE_ITEM,
1349                         NULL, "Not supported by L2 tunnel filter");
1350                 return -rte_errno;
1351         }
1352
1353         vf_num = pci_dev->max_vfs;
1354
1355         if (l2_tn_filter->pool > vf_num)
1356                 return -rte_errno;
1357
1358         return ret;
1359 }
1360
1361 /* Parse to get the attr and action info of flow director rule. */
1362 static int
1363 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1364                           const struct rte_flow_action actions[],
1365                           struct ixgbe_fdir_rule *rule,
1366                           struct rte_flow_error *error)
1367 {
1368         const struct rte_flow_action *act;
1369         const struct rte_flow_action_queue *act_q;
1370         const struct rte_flow_action_mark *mark;
1371
1372         /* parse attr */
1373         /* must be input direction */
1374         if (!attr->ingress) {
1375                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1376                 rte_flow_error_set(error, EINVAL,
1377                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1378                         attr, "Only support ingress.");
1379                 return -rte_errno;
1380         }
1381
1382         /* not supported */
1383         if (attr->egress) {
1384                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1385                 rte_flow_error_set(error, EINVAL,
1386                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1387                         attr, "Not support egress.");
1388                 return -rte_errno;
1389         }
1390
1391         /* not supported */
1392         if (attr->transfer) {
1393                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1394                 rte_flow_error_set(error, EINVAL,
1395                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1396                         attr, "No support for transfer.");
1397                 return -rte_errno;
1398         }
1399
1400         /* not supported */
1401         if (attr->priority) {
1402                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1403                 rte_flow_error_set(error, EINVAL,
1404                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1405                         attr, "Not support priority.");
1406                 return -rte_errno;
1407         }
1408
1409         /* check if the first not void action is QUEUE or DROP. */
1410         act = next_no_void_action(actions, NULL);
1411         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1412             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1413                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1414                 rte_flow_error_set(error, EINVAL,
1415                         RTE_FLOW_ERROR_TYPE_ACTION,
1416                         act, "Not supported action.");
1417                 return -rte_errno;
1418         }
1419
1420         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1421                 act_q = (const struct rte_flow_action_queue *)act->conf;
1422                 rule->queue = act_q->index;
1423         } else { /* drop */
1424                 /* signature mode does not support drop action. */
1425                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1426                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1427                         rte_flow_error_set(error, EINVAL,
1428                                 RTE_FLOW_ERROR_TYPE_ACTION,
1429                                 act, "Not supported action.");
1430                         return -rte_errno;
1431                 }
1432                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1433         }
1434
1435         /* check if the next not void item is MARK */
1436         act = next_no_void_action(actions, act);
1437         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1438                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1439                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1440                 rte_flow_error_set(error, EINVAL,
1441                         RTE_FLOW_ERROR_TYPE_ACTION,
1442                         act, "Not supported action.");
1443                 return -rte_errno;
1444         }
1445
1446         rule->soft_id = 0;
1447
1448         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1449                 mark = (const struct rte_flow_action_mark *)act->conf;
1450                 rule->soft_id = mark->id;
1451                 act = next_no_void_action(actions, act);
1452         }
1453
1454         /* check if the next not void item is END */
1455         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1456                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1457                 rte_flow_error_set(error, EINVAL,
1458                         RTE_FLOW_ERROR_TYPE_ACTION,
1459                         act, "Not supported action.");
1460                 return -rte_errno;
1461         }
1462
1463         return 0;
1464 }
1465
1466 /* search next no void pattern and skip fuzzy */
1467 static inline
1468 const struct rte_flow_item *next_no_fuzzy_pattern(
1469                 const struct rte_flow_item pattern[],
1470                 const struct rte_flow_item *cur)
1471 {
1472         const struct rte_flow_item *next =
1473                 next_no_void_pattern(pattern, cur);
1474         while (1) {
1475                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1476                         return next;
1477                 next = next_no_void_pattern(pattern, next);
1478         }
1479 }
1480
1481 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1482 {
1483         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1484         const struct rte_flow_item *item;
1485         uint32_t sh, lh, mh;
1486         int i = 0;
1487
1488         while (1) {
1489                 item = pattern + i;
1490                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1491                         break;
1492
1493                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1494                         spec = item->spec;
1495                         last = item->last;
1496                         mask = item->mask;
1497
1498                         if (!spec || !mask)
1499                                 return 0;
1500
1501                         sh = spec->thresh;
1502
1503                         if (!last)
1504                                 lh = sh;
1505                         else
1506                                 lh = last->thresh;
1507
1508                         mh = mask->thresh;
1509                         sh = sh & mh;
1510                         lh = lh & mh;
1511
1512                         if (!sh || sh > lh)
1513                                 return 0;
1514
1515                         return 1;
1516                 }
1517
1518                 i++;
1519         }
1520
1521         return 0;
1522 }
1523
1524 /**
1525  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1526  * And get the flow director filter info BTW.
1527  * UDP/TCP/SCTP PATTERN:
1528  * The first not void item can be ETH or IPV4 or IPV6
1529  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1530  * The next not void item could be UDP or TCP or SCTP (optional)
1531  * The next not void item could be RAW (for flexbyte, optional)
1532  * The next not void item must be END.
1533  * A Fuzzy Match pattern can appear at any place before END.
1534  * Fuzzy Match is optional for IPV4 but is required for IPV6
1535  * MAC VLAN PATTERN:
1536  * The first not void item must be ETH.
1537  * The second not void item must be MAC VLAN.
1538  * The next not void item must be END.
1539  * ACTION:
1540  * The first not void action should be QUEUE or DROP.
1541  * The second not void optional action should be MARK,
1542  * mark_id is a uint32_t number.
1543  * The next not void action should be END.
1544  * UDP/TCP/SCTP pattern example:
1545  * ITEM         Spec                    Mask
1546  * ETH          NULL                    NULL
1547  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1548  *              dst_addr 192.167.3.50   0xFFFFFFFF
1549  * UDP/TCP/SCTP src_port        80      0xFFFF
1550  *              dst_port        80      0xFFFF
1551  * FLEX relative        0       0x1
1552  *              search          0       0x1
1553  *              reserved        0       0
1554  *              offset          12      0xFFFFFFFF
1555  *              limit           0       0xFFFF
1556  *              length          2       0xFFFF
1557  *              pattern[0]      0x86    0xFF
1558  *              pattern[1]      0xDD    0xFF
1559  * END
1560  * MAC VLAN pattern example:
1561  * ITEM         Spec                    Mask
1562  * ETH          dst_addr
1563                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1564                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1565  * MAC VLAN     tci     0x2016          0xEFFF
1566  * END
1567  * Other members in mask and spec should set to 0x00.
1568  * Item->last should be NULL.
1569  */
1570 static int
1571 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1572                                const struct rte_flow_attr *attr,
1573                                const struct rte_flow_item pattern[],
1574                                const struct rte_flow_action actions[],
1575                                struct ixgbe_fdir_rule *rule,
1576                                struct rte_flow_error *error)
1577 {
1578         const struct rte_flow_item *item;
1579         const struct rte_flow_item_eth *eth_spec;
1580         const struct rte_flow_item_eth *eth_mask;
1581         const struct rte_flow_item_ipv4 *ipv4_spec;
1582         const struct rte_flow_item_ipv4 *ipv4_mask;
1583         const struct rte_flow_item_ipv6 *ipv6_spec;
1584         const struct rte_flow_item_ipv6 *ipv6_mask;
1585         const struct rte_flow_item_tcp *tcp_spec;
1586         const struct rte_flow_item_tcp *tcp_mask;
1587         const struct rte_flow_item_udp *udp_spec;
1588         const struct rte_flow_item_udp *udp_mask;
1589         const struct rte_flow_item_sctp *sctp_spec;
1590         const struct rte_flow_item_sctp *sctp_mask;
1591         const struct rte_flow_item_vlan *vlan_spec;
1592         const struct rte_flow_item_vlan *vlan_mask;
1593         const struct rte_flow_item_raw *raw_mask;
1594         const struct rte_flow_item_raw *raw_spec;
1595         uint8_t j;
1596
1597         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1598
1599         if (!pattern) {
1600                 rte_flow_error_set(error, EINVAL,
1601                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1602                         NULL, "NULL pattern.");
1603                 return -rte_errno;
1604         }
1605
1606         if (!actions) {
1607                 rte_flow_error_set(error, EINVAL,
1608                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1609                                    NULL, "NULL action.");
1610                 return -rte_errno;
1611         }
1612
1613         if (!attr) {
1614                 rte_flow_error_set(error, EINVAL,
1615                                    RTE_FLOW_ERROR_TYPE_ATTR,
1616                                    NULL, "NULL attribute.");
1617                 return -rte_errno;
1618         }
1619
1620         /**
1621          * Some fields may not be provided. Set spec to 0 and mask to default
1622          * value. So, we need not do anything for the not provided fields later.
1623          */
1624         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1625         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1626         rule->mask.vlan_tci_mask = 0;
1627         rule->mask.flex_bytes_mask = 0;
1628
1629         /**
1630          * The first not void item should be
1631          * MAC or IPv4 or TCP or UDP or SCTP.
1632          */
1633         item = next_no_fuzzy_pattern(pattern, NULL);
1634         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1635             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1636             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1637             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1638             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1639             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1640                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1641                 rte_flow_error_set(error, EINVAL,
1642                         RTE_FLOW_ERROR_TYPE_ITEM,
1643                         item, "Not supported by fdir filter");
1644                 return -rte_errno;
1645         }
1646
1647         if (signature_match(pattern))
1648                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1649         else
1650                 rule->mode = RTE_FDIR_MODE_PERFECT;
1651
1652         /*Not supported last point for range*/
1653         if (item->last) {
1654                 rte_flow_error_set(error, EINVAL,
1655                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1656                         item, "Not supported last point for range");
1657                 return -rte_errno;
1658         }
1659
1660         /* Get the MAC info. */
1661         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1662                 /**
1663                  * Only support vlan and dst MAC address,
1664                  * others should be masked.
1665                  */
1666                 if (item->spec && !item->mask) {
1667                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1668                         rte_flow_error_set(error, EINVAL,
1669                                 RTE_FLOW_ERROR_TYPE_ITEM,
1670                                 item, "Not supported by fdir filter");
1671                         return -rte_errno;
1672                 }
1673
1674                 if (item->spec) {
1675                         rule->b_spec = TRUE;
1676                         eth_spec = item->spec;
1677
1678                         /* Get the dst MAC. */
1679                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1680                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1681                                         eth_spec->dst.addr_bytes[j];
1682                         }
1683                 }
1684
1685
1686                 if (item->mask) {
1687
1688                         rule->b_mask = TRUE;
1689                         eth_mask = item->mask;
1690
1691                         /* Ether type should be masked. */
1692                         if (eth_mask->type ||
1693                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1694                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1695                                 rte_flow_error_set(error, EINVAL,
1696                                         RTE_FLOW_ERROR_TYPE_ITEM,
1697                                         item, "Not supported by fdir filter");
1698                                 return -rte_errno;
1699                         }
1700
1701                         /* If ethernet has meaning, it means MAC VLAN mode. */
1702                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1703
1704                         /**
1705                          * src MAC address must be masked,
1706                          * and don't support dst MAC address mask.
1707                          */
1708                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1709                                 if (eth_mask->src.addr_bytes[j] ||
1710                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1711                                         memset(rule, 0,
1712                                         sizeof(struct ixgbe_fdir_rule));
1713                                         rte_flow_error_set(error, EINVAL,
1714                                         RTE_FLOW_ERROR_TYPE_ITEM,
1715                                         item, "Not supported by fdir filter");
1716                                         return -rte_errno;
1717                                 }
1718                         }
1719
1720                         /* When no VLAN, considered as full mask. */
1721                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1722                 }
1723                 /*** If both spec and mask are item,
1724                  * it means don't care about ETH.
1725                  * Do nothing.
1726                  */
1727
1728                 /**
1729                  * Check if the next not void item is vlan or ipv4.
1730                  * IPv6 is not supported.
1731                  */
1732                 item = next_no_fuzzy_pattern(pattern, item);
1733                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1734                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1735                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1736                                 rte_flow_error_set(error, EINVAL,
1737                                         RTE_FLOW_ERROR_TYPE_ITEM,
1738                                         item, "Not supported by fdir filter");
1739                                 return -rte_errno;
1740                         }
1741                 } else {
1742                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1743                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1744                                 rte_flow_error_set(error, EINVAL,
1745                                         RTE_FLOW_ERROR_TYPE_ITEM,
1746                                         item, "Not supported by fdir filter");
1747                                 return -rte_errno;
1748                         }
1749                 }
1750         }
1751
1752         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1753                 if (!(item->spec && item->mask)) {
1754                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1755                         rte_flow_error_set(error, EINVAL,
1756                                 RTE_FLOW_ERROR_TYPE_ITEM,
1757                                 item, "Not supported by fdir filter");
1758                         return -rte_errno;
1759                 }
1760
1761                 /*Not supported last point for range*/
1762                 if (item->last) {
1763                         rte_flow_error_set(error, EINVAL,
1764                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1765                                 item, "Not supported last point for range");
1766                         return -rte_errno;
1767                 }
1768
1769                 vlan_spec = item->spec;
1770                 vlan_mask = item->mask;
1771
1772                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1773
1774                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1775                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1776                 /* More than one tags are not supported. */
1777
1778                 /* Next not void item must be END */
1779                 item = next_no_fuzzy_pattern(pattern, item);
1780                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1781                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1782                         rte_flow_error_set(error, EINVAL,
1783                                 RTE_FLOW_ERROR_TYPE_ITEM,
1784                                 item, "Not supported by fdir filter");
1785                         return -rte_errno;
1786                 }
1787         }
1788
1789         /* Get the IPV4 info. */
1790         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1791                 /**
1792                  * Set the flow type even if there's no content
1793                  * as we must have a flow type.
1794                  */
1795                 rule->ixgbe_fdir.formatted.flow_type =
1796                         IXGBE_ATR_FLOW_TYPE_IPV4;
1797                 /*Not supported last point for range*/
1798                 if (item->last) {
1799                         rte_flow_error_set(error, EINVAL,
1800                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1801                                 item, "Not supported last point for range");
1802                         return -rte_errno;
1803                 }
1804                 /**
1805                  * Only care about src & dst addresses,
1806                  * others should be masked.
1807                  */
1808                 if (!item->mask) {
1809                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1810                         rte_flow_error_set(error, EINVAL,
1811                                 RTE_FLOW_ERROR_TYPE_ITEM,
1812                                 item, "Not supported by fdir filter");
1813                         return -rte_errno;
1814                 }
1815                 rule->b_mask = TRUE;
1816                 ipv4_mask = item->mask;
1817                 if (ipv4_mask->hdr.version_ihl ||
1818                     ipv4_mask->hdr.type_of_service ||
1819                     ipv4_mask->hdr.total_length ||
1820                     ipv4_mask->hdr.packet_id ||
1821                     ipv4_mask->hdr.fragment_offset ||
1822                     ipv4_mask->hdr.time_to_live ||
1823                     ipv4_mask->hdr.next_proto_id ||
1824                     ipv4_mask->hdr.hdr_checksum) {
1825                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1826                         rte_flow_error_set(error, EINVAL,
1827                                 RTE_FLOW_ERROR_TYPE_ITEM,
1828                                 item, "Not supported by fdir filter");
1829                         return -rte_errno;
1830                 }
1831                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1832                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1833
1834                 if (item->spec) {
1835                         rule->b_spec = TRUE;
1836                         ipv4_spec = item->spec;
1837                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1838                                 ipv4_spec->hdr.dst_addr;
1839                         rule->ixgbe_fdir.formatted.src_ip[0] =
1840                                 ipv4_spec->hdr.src_addr;
1841                 }
1842
1843                 /**
1844                  * Check if the next not void item is
1845                  * TCP or UDP or SCTP or END.
1846                  */
1847                 item = next_no_fuzzy_pattern(pattern, item);
1848                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1849                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1850                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1851                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1852                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1853                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1854                         rte_flow_error_set(error, EINVAL,
1855                                 RTE_FLOW_ERROR_TYPE_ITEM,
1856                                 item, "Not supported by fdir filter");
1857                         return -rte_errno;
1858                 }
1859         }
1860
1861         /* Get the IPV6 info. */
1862         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1863                 /**
1864                  * Set the flow type even if there's no content
1865                  * as we must have a flow type.
1866                  */
1867                 rule->ixgbe_fdir.formatted.flow_type =
1868                         IXGBE_ATR_FLOW_TYPE_IPV6;
1869
1870                 /**
1871                  * 1. must signature match
1872                  * 2. not support last
1873                  * 3. mask must not null
1874                  */
1875                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1876                     item->last ||
1877                     !item->mask) {
1878                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1879                         rte_flow_error_set(error, EINVAL,
1880                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1881                                 item, "Not supported last point for range");
1882                         return -rte_errno;
1883                 }
1884
1885                 rule->b_mask = TRUE;
1886                 ipv6_mask = item->mask;
1887                 if (ipv6_mask->hdr.vtc_flow ||
1888                     ipv6_mask->hdr.payload_len ||
1889                     ipv6_mask->hdr.proto ||
1890                     ipv6_mask->hdr.hop_limits) {
1891                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1892                         rte_flow_error_set(error, EINVAL,
1893                                 RTE_FLOW_ERROR_TYPE_ITEM,
1894                                 item, "Not supported by fdir filter");
1895                         return -rte_errno;
1896                 }
1897
1898                 /* check src addr mask */
1899                 for (j = 0; j < 16; j++) {
1900                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1901                                 rule->mask.src_ipv6_mask |= 1 << j;
1902                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1903                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1904                                 rte_flow_error_set(error, EINVAL,
1905                                         RTE_FLOW_ERROR_TYPE_ITEM,
1906                                         item, "Not supported by fdir filter");
1907                                 return -rte_errno;
1908                         }
1909                 }
1910
1911                 /* check dst addr mask */
1912                 for (j = 0; j < 16; j++) {
1913                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1914                                 rule->mask.dst_ipv6_mask |= 1 << j;
1915                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1916                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1917                                 rte_flow_error_set(error, EINVAL,
1918                                         RTE_FLOW_ERROR_TYPE_ITEM,
1919                                         item, "Not supported by fdir filter");
1920                                 return -rte_errno;
1921                         }
1922                 }
1923
1924                 if (item->spec) {
1925                         rule->b_spec = TRUE;
1926                         ipv6_spec = item->spec;
1927                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1928                                    ipv6_spec->hdr.src_addr, 16);
1929                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1930                                    ipv6_spec->hdr.dst_addr, 16);
1931                 }
1932
1933                 /**
1934                  * Check if the next not void item is
1935                  * TCP or UDP or SCTP or END.
1936                  */
1937                 item = next_no_fuzzy_pattern(pattern, item);
1938                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1939                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1940                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1941                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1942                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1943                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1944                         rte_flow_error_set(error, EINVAL,
1945                                 RTE_FLOW_ERROR_TYPE_ITEM,
1946                                 item, "Not supported by fdir filter");
1947                         return -rte_errno;
1948                 }
1949         }
1950
1951         /* Get the TCP info. */
1952         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1953                 /**
1954                  * Set the flow type even if there's no content
1955                  * as we must have a flow type.
1956                  */
1957                 rule->ixgbe_fdir.formatted.flow_type |=
1958                         IXGBE_ATR_L4TYPE_TCP;
1959                 /*Not supported last point for range*/
1960                 if (item->last) {
1961                         rte_flow_error_set(error, EINVAL,
1962                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1963                                 item, "Not supported last point for range");
1964                         return -rte_errno;
1965                 }
1966                 /**
1967                  * Only care about src & dst ports,
1968                  * others should be masked.
1969                  */
1970                 if (!item->mask) {
1971                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1972                         rte_flow_error_set(error, EINVAL,
1973                                 RTE_FLOW_ERROR_TYPE_ITEM,
1974                                 item, "Not supported by fdir filter");
1975                         return -rte_errno;
1976                 }
1977                 rule->b_mask = TRUE;
1978                 tcp_mask = item->mask;
1979                 if (tcp_mask->hdr.sent_seq ||
1980                     tcp_mask->hdr.recv_ack ||
1981                     tcp_mask->hdr.data_off ||
1982                     tcp_mask->hdr.tcp_flags ||
1983                     tcp_mask->hdr.rx_win ||
1984                     tcp_mask->hdr.cksum ||
1985                     tcp_mask->hdr.tcp_urp) {
1986                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1987                         rte_flow_error_set(error, EINVAL,
1988                                 RTE_FLOW_ERROR_TYPE_ITEM,
1989                                 item, "Not supported by fdir filter");
1990                         return -rte_errno;
1991                 }
1992                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1993                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1994
1995                 if (item->spec) {
1996                         rule->b_spec = TRUE;
1997                         tcp_spec = item->spec;
1998                         rule->ixgbe_fdir.formatted.src_port =
1999                                 tcp_spec->hdr.src_port;
2000                         rule->ixgbe_fdir.formatted.dst_port =
2001                                 tcp_spec->hdr.dst_port;
2002                 }
2003
2004                 item = next_no_fuzzy_pattern(pattern, item);
2005                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2006                     item->type != RTE_FLOW_ITEM_TYPE_END) {
2007                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2008                         rte_flow_error_set(error, EINVAL,
2009                                 RTE_FLOW_ERROR_TYPE_ITEM,
2010                                 item, "Not supported by fdir filter");
2011                         return -rte_errno;
2012                 }
2013
2014         }
2015
2016         /* Get the UDP info */
2017         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2018                 /**
2019                  * Set the flow type even if there's no content
2020                  * as we must have a flow type.
2021                  */
2022                 rule->ixgbe_fdir.formatted.flow_type |=
2023                         IXGBE_ATR_L4TYPE_UDP;
2024                 /*Not supported last point for range*/
2025                 if (item->last) {
2026                         rte_flow_error_set(error, EINVAL,
2027                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2028                                 item, "Not supported last point for range");
2029                         return -rte_errno;
2030                 }
2031                 /**
2032                  * Only care about src & dst ports,
2033                  * others should be masked.
2034                  */
2035                 if (!item->mask) {
2036                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2037                         rte_flow_error_set(error, EINVAL,
2038                                 RTE_FLOW_ERROR_TYPE_ITEM,
2039                                 item, "Not supported by fdir filter");
2040                         return -rte_errno;
2041                 }
2042                 rule->b_mask = TRUE;
2043                 udp_mask = item->mask;
2044                 if (udp_mask->hdr.dgram_len ||
2045                     udp_mask->hdr.dgram_cksum) {
2046                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2047                         rte_flow_error_set(error, EINVAL,
2048                                 RTE_FLOW_ERROR_TYPE_ITEM,
2049                                 item, "Not supported by fdir filter");
2050                         return -rte_errno;
2051                 }
2052                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2053                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2054
2055                 if (item->spec) {
2056                         rule->b_spec = TRUE;
2057                         udp_spec = item->spec;
2058                         rule->ixgbe_fdir.formatted.src_port =
2059                                 udp_spec->hdr.src_port;
2060                         rule->ixgbe_fdir.formatted.dst_port =
2061                                 udp_spec->hdr.dst_port;
2062                 }
2063
2064                 item = next_no_fuzzy_pattern(pattern, item);
2065                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2066                     item->type != RTE_FLOW_ITEM_TYPE_END) {
2067                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2068                         rte_flow_error_set(error, EINVAL,
2069                                 RTE_FLOW_ERROR_TYPE_ITEM,
2070                                 item, "Not supported by fdir filter");
2071                         return -rte_errno;
2072                 }
2073
2074         }
2075
2076         /* Get the SCTP info */
2077         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2078                 /**
2079                  * Set the flow type even if there's no content
2080                  * as we must have a flow type.
2081                  */
2082                 rule->ixgbe_fdir.formatted.flow_type |=
2083                         IXGBE_ATR_L4TYPE_SCTP;
2084                 /*Not supported last point for range*/
2085                 if (item->last) {
2086                         rte_flow_error_set(error, EINVAL,
2087                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2088                                 item, "Not supported last point for range");
2089                         return -rte_errno;
2090                 }
2091
2092                 /* only x550 family only support sctp port */
2093                 if (hw->mac.type == ixgbe_mac_X550 ||
2094                     hw->mac.type == ixgbe_mac_X550EM_x ||
2095                     hw->mac.type == ixgbe_mac_X550EM_a) {
2096                         /**
2097                          * Only care about src & dst ports,
2098                          * others should be masked.
2099                          */
2100                         if (!item->mask) {
2101                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2102                                 rte_flow_error_set(error, EINVAL,
2103                                         RTE_FLOW_ERROR_TYPE_ITEM,
2104                                         item, "Not supported by fdir filter");
2105                                 return -rte_errno;
2106                         }
2107                         rule->b_mask = TRUE;
2108                         sctp_mask = item->mask;
2109                         if (sctp_mask->hdr.tag ||
2110                                 sctp_mask->hdr.cksum) {
2111                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2112                                 rte_flow_error_set(error, EINVAL,
2113                                         RTE_FLOW_ERROR_TYPE_ITEM,
2114                                         item, "Not supported by fdir filter");
2115                                 return -rte_errno;
2116                         }
2117                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2118                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2119
2120                         if (item->spec) {
2121                                 rule->b_spec = TRUE;
2122                                 sctp_spec = item->spec;
2123                                 rule->ixgbe_fdir.formatted.src_port =
2124                                         sctp_spec->hdr.src_port;
2125                                 rule->ixgbe_fdir.formatted.dst_port =
2126                                         sctp_spec->hdr.dst_port;
2127                         }
2128                 /* others even sctp port is not supported */
2129                 } else {
2130                         sctp_mask = item->mask;
2131                         if (sctp_mask &&
2132                                 (sctp_mask->hdr.src_port ||
2133                                  sctp_mask->hdr.dst_port ||
2134                                  sctp_mask->hdr.tag ||
2135                                  sctp_mask->hdr.cksum)) {
2136                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2137                                 rte_flow_error_set(error, EINVAL,
2138                                         RTE_FLOW_ERROR_TYPE_ITEM,
2139                                         item, "Not supported by fdir filter");
2140                                 return -rte_errno;
2141                         }
2142                 }
2143
2144                 item = next_no_fuzzy_pattern(pattern, item);
2145                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2146                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2147                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2148                         rte_flow_error_set(error, EINVAL,
2149                                 RTE_FLOW_ERROR_TYPE_ITEM,
2150                                 item, "Not supported by fdir filter");
2151                         return -rte_errno;
2152                 }
2153         }
2154
2155         /* Get the flex byte info */
2156         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2157                 /* Not supported last point for range*/
2158                 if (item->last) {
2159                         rte_flow_error_set(error, EINVAL,
2160                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2161                                 item, "Not supported last point for range");
2162                         return -rte_errno;
2163                 }
2164                 /* mask should not be null */
2165                 if (!item->mask || !item->spec) {
2166                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2167                         rte_flow_error_set(error, EINVAL,
2168                                 RTE_FLOW_ERROR_TYPE_ITEM,
2169                                 item, "Not supported by fdir filter");
2170                         return -rte_errno;
2171                 }
2172
2173                 raw_mask = item->mask;
2174
2175                 /* check mask */
2176                 if (raw_mask->relative != 0x1 ||
2177                     raw_mask->search != 0x1 ||
2178                     raw_mask->reserved != 0x0 ||
2179                     (uint32_t)raw_mask->offset != 0xffffffff ||
2180                     raw_mask->limit != 0xffff ||
2181                     raw_mask->length != 0xffff) {
2182                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2183                         rte_flow_error_set(error, EINVAL,
2184                                 RTE_FLOW_ERROR_TYPE_ITEM,
2185                                 item, "Not supported by fdir filter");
2186                         return -rte_errno;
2187                 }
2188
2189                 raw_spec = item->spec;
2190
2191                 /* check spec */
2192                 if (raw_spec->relative != 0 ||
2193                     raw_spec->search != 0 ||
2194                     raw_spec->reserved != 0 ||
2195                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2196                     raw_spec->offset % 2 ||
2197                     raw_spec->limit != 0 ||
2198                     raw_spec->length != 2 ||
2199                     /* pattern can't be 0xffff */
2200                     (raw_spec->pattern[0] == 0xff &&
2201                      raw_spec->pattern[1] == 0xff)) {
2202                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2203                         rte_flow_error_set(error, EINVAL,
2204                                 RTE_FLOW_ERROR_TYPE_ITEM,
2205                                 item, "Not supported by fdir filter");
2206                         return -rte_errno;
2207                 }
2208
2209                 /* check pattern mask */
2210                 if (raw_mask->pattern[0] != 0xff ||
2211                     raw_mask->pattern[1] != 0xff) {
2212                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2213                         rte_flow_error_set(error, EINVAL,
2214                                 RTE_FLOW_ERROR_TYPE_ITEM,
2215                                 item, "Not supported by fdir filter");
2216                         return -rte_errno;
2217                 }
2218
2219                 rule->mask.flex_bytes_mask = 0xffff;
2220                 rule->ixgbe_fdir.formatted.flex_bytes =
2221                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2222                         raw_spec->pattern[0];
2223                 rule->flex_bytes_offset = raw_spec->offset;
2224         }
2225
2226         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2227                 /* check if the next not void item is END */
2228                 item = next_no_fuzzy_pattern(pattern, item);
2229                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2230                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2231                         rte_flow_error_set(error, EINVAL,
2232                                 RTE_FLOW_ERROR_TYPE_ITEM,
2233                                 item, "Not supported by fdir filter");
2234                         return -rte_errno;
2235                 }
2236         }
2237
2238         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2239 }
2240
2241 #define NVGRE_PROTOCOL 0x6558
2242
2243 /**
2244  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2245  * And get the flow director filter info BTW.
2246  * VxLAN PATTERN:
2247  * The first not void item must be ETH.
2248  * The second not void item must be IPV4/ IPV6.
2249  * The third not void item must be NVGRE.
2250  * The next not void item must be END.
2251  * NVGRE PATTERN:
2252  * The first not void item must be ETH.
2253  * The second not void item must be IPV4/ IPV6.
2254  * The third not void item must be NVGRE.
2255  * The next not void item must be END.
2256  * ACTION:
2257  * The first not void action should be QUEUE or DROP.
2258  * The second not void optional action should be MARK,
2259  * mark_id is a uint32_t number.
2260  * The next not void action should be END.
2261  * VxLAN pattern example:
2262  * ITEM         Spec                    Mask
2263  * ETH          NULL                    NULL
2264  * IPV4/IPV6    NULL                    NULL
2265  * UDP          NULL                    NULL
2266  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2267  * MAC VLAN     tci     0x2016          0xEFFF
2268  * END
2269  * NEGRV pattern example:
2270  * ITEM         Spec                    Mask
2271  * ETH          NULL                    NULL
2272  * IPV4/IPV6    NULL                    NULL
2273  * NVGRE        protocol        0x6558  0xFFFF
2274  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2275  * MAC VLAN     tci     0x2016          0xEFFF
2276  * END
2277  * other members in mask and spec should set to 0x00.
2278  * item->last should be NULL.
2279  */
2280 static int
2281 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2282                                const struct rte_flow_item pattern[],
2283                                const struct rte_flow_action actions[],
2284                                struct ixgbe_fdir_rule *rule,
2285                                struct rte_flow_error *error)
2286 {
2287         const struct rte_flow_item *item;
2288         const struct rte_flow_item_vxlan *vxlan_spec;
2289         const struct rte_flow_item_vxlan *vxlan_mask;
2290         const struct rte_flow_item_nvgre *nvgre_spec;
2291         const struct rte_flow_item_nvgre *nvgre_mask;
2292         const struct rte_flow_item_eth *eth_spec;
2293         const struct rte_flow_item_eth *eth_mask;
2294         const struct rte_flow_item_vlan *vlan_spec;
2295         const struct rte_flow_item_vlan *vlan_mask;
2296         uint32_t j;
2297
2298         if (!pattern) {
2299                 rte_flow_error_set(error, EINVAL,
2300                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2301                                    NULL, "NULL pattern.");
2302                 return -rte_errno;
2303         }
2304
2305         if (!actions) {
2306                 rte_flow_error_set(error, EINVAL,
2307                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2308                                    NULL, "NULL action.");
2309                 return -rte_errno;
2310         }
2311
2312         if (!attr) {
2313                 rte_flow_error_set(error, EINVAL,
2314                                    RTE_FLOW_ERROR_TYPE_ATTR,
2315                                    NULL, "NULL attribute.");
2316                 return -rte_errno;
2317         }
2318
2319         /**
2320          * Some fields may not be provided. Set spec to 0 and mask to default
2321          * value. So, we need not do anything for the not provided fields later.
2322          */
2323         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2324         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2325         rule->mask.vlan_tci_mask = 0;
2326
2327         /**
2328          * The first not void item should be
2329          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2330          */
2331         item = next_no_void_pattern(pattern, NULL);
2332         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2333             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2334             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2335             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2336             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2337             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2338                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2339                 rte_flow_error_set(error, EINVAL,
2340                         RTE_FLOW_ERROR_TYPE_ITEM,
2341                         item, "Not supported by fdir filter");
2342                 return -rte_errno;
2343         }
2344
2345         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2346
2347         /* Skip MAC. */
2348         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2349                 /* Only used to describe the protocol stack. */
2350                 if (item->spec || item->mask) {
2351                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2352                         rte_flow_error_set(error, EINVAL,
2353                                 RTE_FLOW_ERROR_TYPE_ITEM,
2354                                 item, "Not supported by fdir filter");
2355                         return -rte_errno;
2356                 }
2357                 /* Not supported last point for range*/
2358                 if (item->last) {
2359                         rte_flow_error_set(error, EINVAL,
2360                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2361                                 item, "Not supported last point for range");
2362                         return -rte_errno;
2363                 }
2364
2365                 /* Check if the next not void item is IPv4 or IPv6. */
2366                 item = next_no_void_pattern(pattern, item);
2367                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2368                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2369                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2370                         rte_flow_error_set(error, EINVAL,
2371                                 RTE_FLOW_ERROR_TYPE_ITEM,
2372                                 item, "Not supported by fdir filter");
2373                         return -rte_errno;
2374                 }
2375         }
2376
2377         /* Skip IP. */
2378         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2379             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2380                 /* Only used to describe the protocol stack. */
2381                 if (item->spec || item->mask) {
2382                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2383                         rte_flow_error_set(error, EINVAL,
2384                                 RTE_FLOW_ERROR_TYPE_ITEM,
2385                                 item, "Not supported by fdir filter");
2386                         return -rte_errno;
2387                 }
2388                 /*Not supported last point for range*/
2389                 if (item->last) {
2390                         rte_flow_error_set(error, EINVAL,
2391                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2392                                 item, "Not supported last point for range");
2393                         return -rte_errno;
2394                 }
2395
2396                 /* Check if the next not void item is UDP or NVGRE. */
2397                 item = next_no_void_pattern(pattern, item);
2398                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2399                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2400                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2401                         rte_flow_error_set(error, EINVAL,
2402                                 RTE_FLOW_ERROR_TYPE_ITEM,
2403                                 item, "Not supported by fdir filter");
2404                         return -rte_errno;
2405                 }
2406         }
2407
2408         /* Skip UDP. */
2409         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2410                 /* Only used to describe the protocol stack. */
2411                 if (item->spec || item->mask) {
2412                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2413                         rte_flow_error_set(error, EINVAL,
2414                                 RTE_FLOW_ERROR_TYPE_ITEM,
2415                                 item, "Not supported by fdir filter");
2416                         return -rte_errno;
2417                 }
2418                 /*Not supported last point for range*/
2419                 if (item->last) {
2420                         rte_flow_error_set(error, EINVAL,
2421                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2422                                 item, "Not supported last point for range");
2423                         return -rte_errno;
2424                 }
2425
2426                 /* Check if the next not void item is VxLAN. */
2427                 item = next_no_void_pattern(pattern, item);
2428                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2429                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2430                         rte_flow_error_set(error, EINVAL,
2431                                 RTE_FLOW_ERROR_TYPE_ITEM,
2432                                 item, "Not supported by fdir filter");
2433                         return -rte_errno;
2434                 }
2435         }
2436
2437         /* Get the VxLAN info */
2438         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2439                 rule->ixgbe_fdir.formatted.tunnel_type =
2440                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2441
2442                 /* Only care about VNI, others should be masked. */
2443                 if (!item->mask) {
2444                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2445                         rte_flow_error_set(error, EINVAL,
2446                                 RTE_FLOW_ERROR_TYPE_ITEM,
2447                                 item, "Not supported by fdir filter");
2448                         return -rte_errno;
2449                 }
2450                 /*Not supported last point for range*/
2451                 if (item->last) {
2452                         rte_flow_error_set(error, EINVAL,
2453                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2454                                 item, "Not supported last point for range");
2455                         return -rte_errno;
2456                 }
2457                 rule->b_mask = TRUE;
2458
2459                 /* Tunnel type is always meaningful. */
2460                 rule->mask.tunnel_type_mask = 1;
2461
2462                 vxlan_mask = item->mask;
2463                 if (vxlan_mask->flags) {
2464                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2465                         rte_flow_error_set(error, EINVAL,
2466                                 RTE_FLOW_ERROR_TYPE_ITEM,
2467                                 item, "Not supported by fdir filter");
2468                         return -rte_errno;
2469                 }
2470                 /* VNI must be totally masked or not. */
2471                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2472                         vxlan_mask->vni[2]) &&
2473                         ((vxlan_mask->vni[0] != 0xFF) ||
2474                         (vxlan_mask->vni[1] != 0xFF) ||
2475                                 (vxlan_mask->vni[2] != 0xFF))) {
2476                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2477                         rte_flow_error_set(error, EINVAL,
2478                                 RTE_FLOW_ERROR_TYPE_ITEM,
2479                                 item, "Not supported by fdir filter");
2480                         return -rte_errno;
2481                 }
2482
2483                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2484                         RTE_DIM(vxlan_mask->vni));
2485
2486                 if (item->spec) {
2487                         rule->b_spec = TRUE;
2488                         vxlan_spec = item->spec;
2489                         rte_memcpy(((uint8_t *)
2490                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2491                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2492                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2493                                 rule->ixgbe_fdir.formatted.tni_vni);
2494                 }
2495         }
2496
2497         /* Get the NVGRE info */
2498         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2499                 rule->ixgbe_fdir.formatted.tunnel_type =
2500                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2501
2502                 /**
2503                  * Only care about flags0, flags1, protocol and TNI,
2504                  * others should be masked.
2505                  */
2506                 if (!item->mask) {
2507                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2508                         rte_flow_error_set(error, EINVAL,
2509                                 RTE_FLOW_ERROR_TYPE_ITEM,
2510                                 item, "Not supported by fdir filter");
2511                         return -rte_errno;
2512                 }
2513                 /*Not supported last point for range*/
2514                 if (item->last) {
2515                         rte_flow_error_set(error, EINVAL,
2516                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2517                                 item, "Not supported last point for range");
2518                         return -rte_errno;
2519                 }
2520                 rule->b_mask = TRUE;
2521
2522                 /* Tunnel type is always meaningful. */
2523                 rule->mask.tunnel_type_mask = 1;
2524
2525                 nvgre_mask = item->mask;
2526                 if (nvgre_mask->flow_id) {
2527                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2528                         rte_flow_error_set(error, EINVAL,
2529                                 RTE_FLOW_ERROR_TYPE_ITEM,
2530                                 item, "Not supported by fdir filter");
2531                         return -rte_errno;
2532                 }
2533                 if (nvgre_mask->protocol &&
2534                     nvgre_mask->protocol != 0xFFFF) {
2535                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2536                         rte_flow_error_set(error, EINVAL,
2537                                 RTE_FLOW_ERROR_TYPE_ITEM,
2538                                 item, "Not supported by fdir filter");
2539                         return -rte_errno;
2540                 }
2541                 if (nvgre_mask->c_k_s_rsvd0_ver &&
2542                     nvgre_mask->c_k_s_rsvd0_ver !=
2543                         rte_cpu_to_be_16(0xFFFF)) {
2544                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2545                         rte_flow_error_set(error, EINVAL,
2546                                 RTE_FLOW_ERROR_TYPE_ITEM,
2547                                 item, "Not supported by fdir filter");
2548                         return -rte_errno;
2549                 }
2550                 /* TNI must be totally masked or not. */
2551                 if (nvgre_mask->tni[0] &&
2552                     ((nvgre_mask->tni[0] != 0xFF) ||
2553                     (nvgre_mask->tni[1] != 0xFF) ||
2554                     (nvgre_mask->tni[2] != 0xFF))) {
2555                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2556                         rte_flow_error_set(error, EINVAL,
2557                                 RTE_FLOW_ERROR_TYPE_ITEM,
2558                                 item, "Not supported by fdir filter");
2559                         return -rte_errno;
2560                 }
2561                 /* tni is a 24-bits bit field */
2562                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2563                         RTE_DIM(nvgre_mask->tni));
2564                 rule->mask.tunnel_id_mask <<= 8;
2565
2566                 if (item->spec) {
2567                         rule->b_spec = TRUE;
2568                         nvgre_spec = item->spec;
2569                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2570                             rte_cpu_to_be_16(0x2000) &&
2571                                 nvgre_mask->c_k_s_rsvd0_ver) {
2572                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2573                                 rte_flow_error_set(error, EINVAL,
2574                                         RTE_FLOW_ERROR_TYPE_ITEM,
2575                                         item, "Not supported by fdir filter");
2576                                 return -rte_errno;
2577                         }
2578                         if (nvgre_mask->protocol &&
2579                             nvgre_spec->protocol !=
2580                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2581                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2582                                 rte_flow_error_set(error, EINVAL,
2583                                         RTE_FLOW_ERROR_TYPE_ITEM,
2584                                         item, "Not supported by fdir filter");
2585                                 return -rte_errno;
2586                         }
2587                         /* tni is a 24-bits bit field */
2588                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2589                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2590                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2591                 }
2592         }
2593
2594         /* check if the next not void item is MAC */
2595         item = next_no_void_pattern(pattern, item);
2596         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2597                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2598                 rte_flow_error_set(error, EINVAL,
2599                         RTE_FLOW_ERROR_TYPE_ITEM,
2600                         item, "Not supported by fdir filter");
2601                 return -rte_errno;
2602         }
2603
2604         /**
2605          * Only support vlan and dst MAC address,
2606          * others should be masked.
2607          */
2608
2609         if (!item->mask) {
2610                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2611                 rte_flow_error_set(error, EINVAL,
2612                         RTE_FLOW_ERROR_TYPE_ITEM,
2613                         item, "Not supported by fdir filter");
2614                 return -rte_errno;
2615         }
2616         /*Not supported last point for range*/
2617         if (item->last) {
2618                 rte_flow_error_set(error, EINVAL,
2619                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2620                         item, "Not supported last point for range");
2621                 return -rte_errno;
2622         }
2623         rule->b_mask = TRUE;
2624         eth_mask = item->mask;
2625
2626         /* Ether type should be masked. */
2627         if (eth_mask->type) {
2628                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2629                 rte_flow_error_set(error, EINVAL,
2630                         RTE_FLOW_ERROR_TYPE_ITEM,
2631                         item, "Not supported by fdir filter");
2632                 return -rte_errno;
2633         }
2634
2635         /* src MAC address should be masked. */
2636         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2637                 if (eth_mask->src.addr_bytes[j]) {
2638                         memset(rule, 0,
2639                                sizeof(struct ixgbe_fdir_rule));
2640                         rte_flow_error_set(error, EINVAL,
2641                                 RTE_FLOW_ERROR_TYPE_ITEM,
2642                                 item, "Not supported by fdir filter");
2643                         return -rte_errno;
2644                 }
2645         }
2646         rule->mask.mac_addr_byte_mask = 0;
2647         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2648                 /* It's a per byte mask. */
2649                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2650                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2651                 } else if (eth_mask->dst.addr_bytes[j]) {
2652                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2653                         rte_flow_error_set(error, EINVAL,
2654                                 RTE_FLOW_ERROR_TYPE_ITEM,
2655                                 item, "Not supported by fdir filter");
2656                         return -rte_errno;
2657                 }
2658         }
2659
2660         /* When no vlan, considered as full mask. */
2661         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2662
2663         if (item->spec) {
2664                 rule->b_spec = TRUE;
2665                 eth_spec = item->spec;
2666
2667                 /* Get the dst MAC. */
2668                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2669                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2670                                 eth_spec->dst.addr_bytes[j];
2671                 }
2672         }
2673
2674         /**
2675          * Check if the next not void item is vlan or ipv4.
2676          * IPv6 is not supported.
2677          */
2678         item = next_no_void_pattern(pattern, item);
2679         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2680                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2681                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2682                 rte_flow_error_set(error, EINVAL,
2683                         RTE_FLOW_ERROR_TYPE_ITEM,
2684                         item, "Not supported by fdir filter");
2685                 return -rte_errno;
2686         }
2687         /*Not supported last point for range*/
2688         if (item->last) {
2689                 rte_flow_error_set(error, EINVAL,
2690                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2691                         item, "Not supported last point for range");
2692                 return -rte_errno;
2693         }
2694
2695         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2696                 if (!(item->spec && item->mask)) {
2697                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2698                         rte_flow_error_set(error, EINVAL,
2699                                 RTE_FLOW_ERROR_TYPE_ITEM,
2700                                 item, "Not supported by fdir filter");
2701                         return -rte_errno;
2702                 }
2703
2704                 vlan_spec = item->spec;
2705                 vlan_mask = item->mask;
2706
2707                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2708
2709                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2710                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2711                 /* More than one tags are not supported. */
2712
2713                 /* check if the next not void item is END */
2714                 item = next_no_void_pattern(pattern, item);
2715
2716                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2717                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2718                         rte_flow_error_set(error, EINVAL,
2719                                 RTE_FLOW_ERROR_TYPE_ITEM,
2720                                 item, "Not supported by fdir filter");
2721                         return -rte_errno;
2722                 }
2723         }
2724
2725         /**
2726          * If the tags is 0, it means don't care about the VLAN.
2727          * Do nothing.
2728          */
2729
2730         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2731 }
2732
2733 static int
2734 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2735                         const struct rte_flow_attr *attr,
2736                         const struct rte_flow_item pattern[],
2737                         const struct rte_flow_action actions[],
2738                         struct ixgbe_fdir_rule *rule,
2739                         struct rte_flow_error *error)
2740 {
2741         int ret;
2742         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2743         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2744
2745         if (hw->mac.type != ixgbe_mac_82599EB &&
2746                 hw->mac.type != ixgbe_mac_X540 &&
2747                 hw->mac.type != ixgbe_mac_X550 &&
2748                 hw->mac.type != ixgbe_mac_X550EM_x &&
2749                 hw->mac.type != ixgbe_mac_X550EM_a)
2750                 return -ENOTSUP;
2751
2752         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2753                                         actions, rule, error);
2754
2755         if (!ret)
2756                 goto step_next;
2757
2758         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2759                                         actions, rule, error);
2760
2761         if (ret)
2762                 return ret;
2763
2764 step_next:
2765
2766         if (hw->mac.type == ixgbe_mac_82599EB &&
2767                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2768                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2769                 rule->ixgbe_fdir.formatted.dst_port != 0))
2770                 return -ENOTSUP;
2771
2772         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2773             fdir_mode != rule->mode)
2774                 return -ENOTSUP;
2775
2776         if (rule->queue >= dev->data->nb_rx_queues)
2777                 return -ENOTSUP;
2778
2779         return ret;
2780 }
2781
2782 static int
2783 ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
2784                         const struct rte_flow_attr *attr,
2785                         const struct rte_flow_action actions[],
2786                         struct ixgbe_rte_flow_rss_conf *rss_conf,
2787                         struct rte_flow_error *error)
2788 {
2789         const struct rte_flow_action *act;
2790         const struct rte_flow_action_rss *rss;
2791         uint16_t n;
2792
2793         /**
2794          * rss only supports forwarding,
2795          * check if the first not void action is RSS.
2796          */
2797         act = next_no_void_action(actions, NULL);
2798         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2799                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2800                 rte_flow_error_set(error, EINVAL,
2801                         RTE_FLOW_ERROR_TYPE_ACTION,
2802                         act, "Not supported action.");
2803                 return -rte_errno;
2804         }
2805
2806         rss = (const struct rte_flow_action_rss *)act->conf;
2807
2808         if (!rss || !rss->queue_num) {
2809                 rte_flow_error_set(error, EINVAL,
2810                                 RTE_FLOW_ERROR_TYPE_ACTION,
2811                                 act,
2812                            "no valid queues");
2813                 return -rte_errno;
2814         }
2815
2816         for (n = 0; n < rss->queue_num; n++) {
2817                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2818                         rte_flow_error_set(error, EINVAL,
2819                                    RTE_FLOW_ERROR_TYPE_ACTION,
2820                                    act,
2821                                    "queue id > max number of queues");
2822                         return -rte_errno;
2823                 }
2824         }
2825
2826         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2827                 return rte_flow_error_set
2828                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2829                          "non-default RSS hash functions are not supported");
2830         if (rss->level)
2831                 return rte_flow_error_set
2832                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2833                          "a nonzero RSS encapsulation level is not supported");
2834         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2835                 return rte_flow_error_set
2836                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2837                          "RSS hash key must be exactly 40 bytes");
2838         if (rss->queue_num > RTE_DIM(rss_conf->queue))
2839                 return rte_flow_error_set
2840                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2841                          "too many queues for RSS context");
2842         if (ixgbe_rss_conf_init(rss_conf, rss))
2843                 return rte_flow_error_set
2844                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2845                          "RSS context initialization failure");
2846
2847         /* check if the next not void item is END */
2848         act = next_no_void_action(actions, act);
2849         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2850                 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
2851                 rte_flow_error_set(error, EINVAL,
2852                         RTE_FLOW_ERROR_TYPE_ACTION,
2853                         act, "Not supported action.");
2854                 return -rte_errno;
2855         }
2856
2857         /* parse attr */
2858         /* must be input direction */
2859         if (!attr->ingress) {
2860                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2861                 rte_flow_error_set(error, EINVAL,
2862                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2863                                    attr, "Only support ingress.");
2864                 return -rte_errno;
2865         }
2866
2867         /* not supported */
2868         if (attr->egress) {
2869                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2870                 rte_flow_error_set(error, EINVAL,
2871                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2872                                    attr, "Not support egress.");
2873                 return -rte_errno;
2874         }
2875
2876         /* not supported */
2877         if (attr->transfer) {
2878                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2879                 rte_flow_error_set(error, EINVAL,
2880                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2881                                    attr, "No support for transfer.");
2882                 return -rte_errno;
2883         }
2884
2885         if (attr->priority > 0xFFFF) {
2886                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2887                 rte_flow_error_set(error, EINVAL,
2888                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2889                                    attr, "Error priority.");
2890                 return -rte_errno;
2891         }
2892
2893         return 0;
2894 }
2895
2896 /* remove the rss filter */
2897 static void
2898 ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
2899 {
2900         struct ixgbe_filter_info *filter_info =
2901                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2902
2903         if (filter_info->rss_info.conf.queue_num)
2904                 ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2905 }
2906
2907 void
2908 ixgbe_filterlist_init(void)
2909 {
2910         TAILQ_INIT(&filter_ntuple_list);
2911         TAILQ_INIT(&filter_ethertype_list);
2912         TAILQ_INIT(&filter_syn_list);
2913         TAILQ_INIT(&filter_fdir_list);
2914         TAILQ_INIT(&filter_l2_tunnel_list);
2915         TAILQ_INIT(&filter_rss_list);
2916         TAILQ_INIT(&ixgbe_flow_list);
2917 }
2918
2919 void
2920 ixgbe_filterlist_flush(void)
2921 {
2922         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2923         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2924         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2925         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2926         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2927         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2928         struct ixgbe_rss_conf_ele *rss_filter_ptr;
2929
2930         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2931                 TAILQ_REMOVE(&filter_ntuple_list,
2932                                  ntuple_filter_ptr,
2933                                  entries);
2934                 rte_free(ntuple_filter_ptr);
2935         }
2936
2937         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2938                 TAILQ_REMOVE(&filter_ethertype_list,
2939                                  ethertype_filter_ptr,
2940                                  entries);
2941                 rte_free(ethertype_filter_ptr);
2942         }
2943
2944         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2945                 TAILQ_REMOVE(&filter_syn_list,
2946                                  syn_filter_ptr,
2947                                  entries);
2948                 rte_free(syn_filter_ptr);
2949         }
2950
2951         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2952                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2953                                  l2_tn_filter_ptr,
2954                                  entries);
2955                 rte_free(l2_tn_filter_ptr);
2956         }
2957
2958         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2959                 TAILQ_REMOVE(&filter_fdir_list,
2960                                  fdir_rule_ptr,
2961                                  entries);
2962                 rte_free(fdir_rule_ptr);
2963         }
2964
2965         while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2966                 TAILQ_REMOVE(&filter_rss_list,
2967                                  rss_filter_ptr,
2968                                  entries);
2969                 rte_free(rss_filter_ptr);
2970         }
2971
2972         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2973                 TAILQ_REMOVE(&ixgbe_flow_list,
2974                                  ixgbe_flow_mem_ptr,
2975                                  entries);
2976                 rte_free(ixgbe_flow_mem_ptr->flow);
2977                 rte_free(ixgbe_flow_mem_ptr);
2978         }
2979 }
2980
2981 /**
2982  * Create or destroy a flow rule.
2983  * Theorically one rule can match more than one filters.
2984  * We will let it use the filter which it hitt first.
2985  * So, the sequence matters.
2986  */
2987 static struct rte_flow *
2988 ixgbe_flow_create(struct rte_eth_dev *dev,
2989                   const struct rte_flow_attr *attr,
2990                   const struct rte_flow_item pattern[],
2991                   const struct rte_flow_action actions[],
2992                   struct rte_flow_error *error)
2993 {
2994         int ret;
2995         struct rte_eth_ntuple_filter ntuple_filter;
2996         struct rte_eth_ethertype_filter ethertype_filter;
2997         struct rte_eth_syn_filter syn_filter;
2998         struct ixgbe_fdir_rule fdir_rule;
2999         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3000         struct ixgbe_hw_fdir_info *fdir_info =
3001                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3002         struct ixgbe_rte_flow_rss_conf rss_conf;
3003         struct rte_flow *flow = NULL;
3004         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3005         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3006         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3007         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3008         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3009         struct ixgbe_rss_conf_ele *rss_filter_ptr;
3010         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3011         uint8_t first_mask = FALSE;
3012
3013         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
3014         if (!flow) {
3015                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3016                 return (struct rte_flow *)flow;
3017         }
3018         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
3019                         sizeof(struct ixgbe_flow_mem), 0);
3020         if (!ixgbe_flow_mem_ptr) {
3021                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3022                 rte_free(flow);
3023                 return NULL;
3024         }
3025         ixgbe_flow_mem_ptr->flow = flow;
3026         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
3027                                 ixgbe_flow_mem_ptr, entries);
3028
3029         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3030         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3031                         actions, &ntuple_filter, error);
3032
3033 #ifdef RTE_LIBRTE_SECURITY
3034         /* ESP flow not really a flow*/
3035         if (ntuple_filter.proto == IPPROTO_ESP)
3036                 return flow;
3037 #endif
3038
3039         if (!ret) {
3040                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
3041                 if (!ret) {
3042                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
3043                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
3044                         if (!ntuple_filter_ptr) {
3045                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3046                                 goto out;
3047                         }
3048                         rte_memcpy(&ntuple_filter_ptr->filter_info,
3049                                 &ntuple_filter,
3050                                 sizeof(struct rte_eth_ntuple_filter));
3051                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
3052                                 ntuple_filter_ptr, entries);
3053                         flow->rule = ntuple_filter_ptr;
3054                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
3055                         return flow;
3056                 }
3057                 goto out;
3058         }
3059
3060         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3061         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3062                                 actions, &ethertype_filter, error);
3063         if (!ret) {
3064                 ret = ixgbe_add_del_ethertype_filter(dev,
3065                                 &ethertype_filter, TRUE);
3066                 if (!ret) {
3067                         ethertype_filter_ptr = rte_zmalloc(
3068                                 "ixgbe_ethertype_filter",
3069                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
3070                         if (!ethertype_filter_ptr) {
3071                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3072                                 goto out;
3073                         }
3074                         rte_memcpy(&ethertype_filter_ptr->filter_info,
3075                                 &ethertype_filter,
3076                                 sizeof(struct rte_eth_ethertype_filter));
3077                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
3078                                 ethertype_filter_ptr, entries);
3079                         flow->rule = ethertype_filter_ptr;
3080                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3081                         return flow;
3082                 }
3083                 goto out;
3084         }
3085
3086         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3087         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3088                                 actions, &syn_filter, error);
3089         if (!ret) {
3090                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
3091                 if (!ret) {
3092                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
3093                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
3094                         if (!syn_filter_ptr) {
3095                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3096                                 goto out;
3097                         }
3098                         rte_memcpy(&syn_filter_ptr->filter_info,
3099                                 &syn_filter,
3100                                 sizeof(struct rte_eth_syn_filter));
3101                         TAILQ_INSERT_TAIL(&filter_syn_list,
3102                                 syn_filter_ptr,
3103                                 entries);
3104                         flow->rule = syn_filter_ptr;
3105                         flow->filter_type = RTE_ETH_FILTER_SYN;
3106                         return flow;
3107                 }
3108                 goto out;
3109         }
3110
3111         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3112         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3113                                 actions, &fdir_rule, error);
3114         if (!ret) {
3115                 /* A mask cannot be deleted. */
3116                 if (fdir_rule.b_mask) {
3117                         if (!fdir_info->mask_added) {
3118                                 /* It's the first time the mask is set. */
3119                                 rte_memcpy(&fdir_info->mask,
3120                                         &fdir_rule.mask,
3121                                         sizeof(struct ixgbe_hw_fdir_mask));
3122                                 fdir_info->flex_bytes_offset =
3123                                         fdir_rule.flex_bytes_offset;
3124
3125                                 if (fdir_rule.mask.flex_bytes_mask)
3126                                         ixgbe_fdir_set_flexbytes_offset(dev,
3127                                                 fdir_rule.flex_bytes_offset);
3128
3129                                 ret = ixgbe_fdir_set_input_mask(dev);
3130                                 if (ret)
3131                                         goto out;
3132
3133                                 fdir_info->mask_added = TRUE;
3134                                 first_mask = TRUE;
3135                         } else {
3136                                 /**
3137                                  * Only support one global mask,
3138                                  * all the masks should be the same.
3139                                  */
3140                                 ret = memcmp(&fdir_info->mask,
3141                                         &fdir_rule.mask,
3142                                         sizeof(struct ixgbe_hw_fdir_mask));
3143                                 if (ret)
3144                                         goto out;
3145
3146                                 if (fdir_info->flex_bytes_offset !=
3147                                                 fdir_rule.flex_bytes_offset)
3148                                         goto out;
3149                         }
3150                 }
3151
3152                 if (fdir_rule.b_spec) {
3153                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
3154                                         FALSE, FALSE);
3155                         if (!ret) {
3156                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
3157                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
3158                                 if (!fdir_rule_ptr) {
3159                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
3160                                         goto out;
3161                                 }
3162                                 rte_memcpy(&fdir_rule_ptr->filter_info,
3163                                         &fdir_rule,
3164                                         sizeof(struct ixgbe_fdir_rule));
3165                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
3166                                         fdir_rule_ptr, entries);
3167                                 flow->rule = fdir_rule_ptr;
3168                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
3169
3170                                 return flow;
3171                         }
3172
3173                         if (ret) {
3174                                 /**
3175                                  * clean the mask_added flag if fail to
3176                                  * program
3177                                  **/
3178                                 if (first_mask)
3179                                         fdir_info->mask_added = FALSE;
3180                                 goto out;
3181                         }
3182                 }
3183
3184                 goto out;
3185         }
3186
3187         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3188         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3189                                         actions, &l2_tn_filter, error);
3190         if (!ret) {
3191                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3192                 if (!ret) {
3193                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3194                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3195                         if (!l2_tn_filter_ptr) {
3196                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3197                                 goto out;
3198                         }
3199                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
3200                                 &l2_tn_filter,
3201                                 sizeof(struct rte_eth_l2_tunnel_conf));
3202                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3203                                 l2_tn_filter_ptr, entries);
3204                         flow->rule = l2_tn_filter_ptr;
3205                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3206                         return flow;
3207                 }
3208         }
3209
3210         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3211         ret = ixgbe_parse_rss_filter(dev, attr,
3212                                         actions, &rss_conf, error);
3213         if (!ret) {
3214                 ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
3215                 if (!ret) {
3216                         rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
3217                                 sizeof(struct ixgbe_rss_conf_ele), 0);
3218                         if (!rss_filter_ptr) {
3219                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3220                                 goto out;
3221                         }
3222                         ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
3223                                             &rss_conf.conf);
3224                         TAILQ_INSERT_TAIL(&filter_rss_list,
3225                                 rss_filter_ptr, entries);
3226                         flow->rule = rss_filter_ptr;
3227                         flow->filter_type = RTE_ETH_FILTER_HASH;
3228                         return flow;
3229                 }
3230         }
3231
3232 out:
3233         TAILQ_REMOVE(&ixgbe_flow_list,
3234                 ixgbe_flow_mem_ptr, entries);
3235         rte_flow_error_set(error, -ret,
3236                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3237                            "Failed to create flow.");
3238         rte_free(ixgbe_flow_mem_ptr);
3239         rte_free(flow);
3240         return NULL;
3241 }
3242
3243 /**
3244  * Check if the flow rule is supported by ixgbe.
3245  * It only checkes the format. Don't guarantee the rule can be programmed into
3246  * the HW. Because there can be no enough room for the rule.
3247  */
3248 static int
3249 ixgbe_flow_validate(struct rte_eth_dev *dev,
3250                 const struct rte_flow_attr *attr,
3251                 const struct rte_flow_item pattern[],
3252                 const struct rte_flow_action actions[],
3253                 struct rte_flow_error *error)
3254 {
3255         struct rte_eth_ntuple_filter ntuple_filter;
3256         struct rte_eth_ethertype_filter ethertype_filter;
3257         struct rte_eth_syn_filter syn_filter;
3258         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3259         struct ixgbe_fdir_rule fdir_rule;
3260         struct ixgbe_rte_flow_rss_conf rss_conf;
3261         int ret;
3262
3263         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3264         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3265                                 actions, &ntuple_filter, error);
3266         if (!ret)
3267                 return 0;
3268
3269         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3270         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3271                                 actions, &ethertype_filter, error);
3272         if (!ret)
3273                 return 0;
3274
3275         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3276         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3277                                 actions, &syn_filter, error);
3278         if (!ret)
3279                 return 0;
3280
3281         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3282         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3283                                 actions, &fdir_rule, error);
3284         if (!ret)
3285                 return 0;
3286
3287         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3288         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3289                                 actions, &l2_tn_filter, error);
3290         if (!ret)
3291                 return 0;
3292
3293         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3294         ret = ixgbe_parse_rss_filter(dev, attr,
3295                                         actions, &rss_conf, error);
3296
3297         return ret;
3298 }
3299
3300 /* Destroy a flow rule on ixgbe. */
3301 static int
3302 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3303                 struct rte_flow *flow,
3304                 struct rte_flow_error *error)
3305 {
3306         int ret;
3307         struct rte_flow *pmd_flow = flow;
3308         enum rte_filter_type filter_type = pmd_flow->filter_type;
3309         struct rte_eth_ntuple_filter ntuple_filter;
3310         struct rte_eth_ethertype_filter ethertype_filter;
3311         struct rte_eth_syn_filter syn_filter;
3312         struct ixgbe_fdir_rule fdir_rule;
3313         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3314         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3315         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3316         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3317         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3318         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3319         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3320         struct ixgbe_hw_fdir_info *fdir_info =
3321                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3322         struct ixgbe_rss_conf_ele *rss_filter_ptr;
3323
3324         switch (filter_type) {
3325         case RTE_ETH_FILTER_NTUPLE:
3326                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3327                                         pmd_flow->rule;
3328                 rte_memcpy(&ntuple_filter,
3329                         &ntuple_filter_ptr->filter_info,
3330                         sizeof(struct rte_eth_ntuple_filter));
3331                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3332                 if (!ret) {
3333                         TAILQ_REMOVE(&filter_ntuple_list,
3334                         ntuple_filter_ptr, entries);
3335                         rte_free(ntuple_filter_ptr);
3336                 }
3337                 break;
3338         case RTE_ETH_FILTER_ETHERTYPE:
3339                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3340                                         pmd_flow->rule;
3341                 rte_memcpy(&ethertype_filter,
3342                         &ethertype_filter_ptr->filter_info,
3343                         sizeof(struct rte_eth_ethertype_filter));
3344                 ret = ixgbe_add_del_ethertype_filter(dev,
3345                                 &ethertype_filter, FALSE);
3346                 if (!ret) {
3347                         TAILQ_REMOVE(&filter_ethertype_list,
3348                                 ethertype_filter_ptr, entries);
3349                         rte_free(ethertype_filter_ptr);
3350                 }
3351                 break;
3352         case RTE_ETH_FILTER_SYN:
3353                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3354                                 pmd_flow->rule;
3355                 rte_memcpy(&syn_filter,
3356                         &syn_filter_ptr->filter_info,
3357                         sizeof(struct rte_eth_syn_filter));
3358                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3359                 if (!ret) {
3360                         TAILQ_REMOVE(&filter_syn_list,
3361                                 syn_filter_ptr, entries);
3362                         rte_free(syn_filter_ptr);
3363                 }
3364                 break;
3365         case RTE_ETH_FILTER_FDIR:
3366                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3367                 rte_memcpy(&fdir_rule,
3368                         &fdir_rule_ptr->filter_info,
3369                         sizeof(struct ixgbe_fdir_rule));
3370                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3371                 if (!ret) {
3372                         TAILQ_REMOVE(&filter_fdir_list,
3373                                 fdir_rule_ptr, entries);
3374                         rte_free(fdir_rule_ptr);
3375                         if (TAILQ_EMPTY(&filter_fdir_list))
3376                                 fdir_info->mask_added = false;
3377                 }
3378                 break;
3379         case RTE_ETH_FILTER_L2_TUNNEL:
3380                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3381                                 pmd_flow->rule;
3382                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3383                         sizeof(struct rte_eth_l2_tunnel_conf));
3384                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3385                 if (!ret) {
3386                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3387                                 l2_tn_filter_ptr, entries);
3388                         rte_free(l2_tn_filter_ptr);
3389                 }
3390                 break;
3391         case RTE_ETH_FILTER_HASH:
3392                 rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
3393                                 pmd_flow->rule;
3394                 ret = ixgbe_config_rss_filter(dev,
3395                                         &rss_filter_ptr->filter_info, FALSE);
3396                 if (!ret) {
3397                         TAILQ_REMOVE(&filter_rss_list,
3398                                 rss_filter_ptr, entries);
3399                         rte_free(rss_filter_ptr);
3400                 }
3401                 break;
3402         default:
3403                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3404                             filter_type);
3405                 ret = -EINVAL;
3406                 break;
3407         }
3408
3409         if (ret) {
3410                 rte_flow_error_set(error, EINVAL,
3411                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3412                                 NULL, "Failed to destroy flow");
3413                 return ret;
3414         }
3415
3416         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3417                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3418                         TAILQ_REMOVE(&ixgbe_flow_list,
3419                                 ixgbe_flow_mem_ptr, entries);
3420                         rte_free(ixgbe_flow_mem_ptr);
3421                 }
3422         }
3423         rte_free(flow);
3424
3425         return ret;
3426 }
3427
3428 /*  Destroy all flow rules associated with a port on ixgbe. */
3429 static int
3430 ixgbe_flow_flush(struct rte_eth_dev *dev,
3431                 struct rte_flow_error *error)
3432 {
3433         int ret = 0;
3434
3435         ixgbe_clear_all_ntuple_filter(dev);
3436         ixgbe_clear_all_ethertype_filter(dev);
3437         ixgbe_clear_syn_filter(dev);
3438
3439         ret = ixgbe_clear_all_fdir_filter(dev);
3440         if (ret < 0) {
3441                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3442                                         NULL, "Failed to flush rule");
3443                 return ret;
3444         }
3445
3446         ret = ixgbe_clear_all_l2_tn_filter(dev);
3447         if (ret < 0) {
3448                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3449                                         NULL, "Failed to flush rule");
3450                 return ret;
3451         }
3452
3453         ixgbe_clear_rss_filter(dev);
3454
3455         ixgbe_filterlist_flush();
3456
3457         return 0;
3458 }
3459
3460 const struct rte_flow_ops ixgbe_flow_ops = {
3461         .validate = ixgbe_flow_validate,
3462         .create = ixgbe_flow_create,
3463         .destroy = ixgbe_flow_destroy,
3464         .flush = ixgbe_flow_flush,
3465 };