ethdev: add namespace
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cycles.h>
16
17 #include <rte_interrupts.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_pci.h>
21 #include <rte_atomic.h>
22 #include <rte_branch_prediction.h>
23 #include <rte_memory.h>
24 #include <rte_eal.h>
25 #include <rte_alarm.h>
26 #include <rte_ether.h>
27 #include <ethdev_driver.h>
28 #include <rte_malloc.h>
29 #include <rte_random.h>
30 #include <rte_dev.h>
31 #include <rte_hash_crc.h>
32 #include <rte_flow.h>
33 #include <rte_flow_driver.h>
34
35 #include "ixgbe_logs.h"
36 #include "base/ixgbe_api.h"
37 #include "base/ixgbe_vf.h"
38 #include "base/ixgbe_common.h"
39 #include "base/ixgbe_osdep.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
46
47
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
51
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55         struct rte_eth_ntuple_filter filter_info;
56 };
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60         struct rte_eth_ethertype_filter filter_info;
61 };
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65         struct rte_eth_syn_filter filter_info;
66 };
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70         struct ixgbe_fdir_rule filter_info;
71 };
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75         struct ixgbe_l2_tunnel_conf filter_info;
76 };
77 /* rss filter list structure */
78 struct ixgbe_rss_conf_ele {
79         TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
80         struct ixgbe_rte_flow_rss_conf filter_info;
81 };
82 /* ixgbe_flow memory list structure */
83 struct ixgbe_flow_mem {
84         TAILQ_ENTRY(ixgbe_flow_mem) entries;
85         struct rte_flow *flow;
86 };
87
88 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
89 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
90 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
91 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
92 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
93 TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
94 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
95
96 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
97 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
98 static struct ixgbe_syn_filter_list filter_syn_list;
99 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
100 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
101 static struct ixgbe_rss_filter_list filter_rss_list;
102 static struct ixgbe_flow_mem_list ixgbe_flow_list;
103
104 /**
105  * Endless loop will never happen with below assumption
106  * 1. there is at least one no-void item(END)
107  * 2. cur is before END.
108  */
109 static inline
110 const struct rte_flow_item *next_no_void_pattern(
111                 const struct rte_flow_item pattern[],
112                 const struct rte_flow_item *cur)
113 {
114         const struct rte_flow_item *next =
115                 cur ? cur + 1 : &pattern[0];
116         while (1) {
117                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
118                         return next;
119                 next++;
120         }
121 }
122
123 static inline
124 const struct rte_flow_action *next_no_void_action(
125                 const struct rte_flow_action actions[],
126                 const struct rte_flow_action *cur)
127 {
128         const struct rte_flow_action *next =
129                 cur ? cur + 1 : &actions[0];
130         while (1) {
131                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
132                         return next;
133                 next++;
134         }
135 }
136
137 /**
138  * Please aware there's an asumption for all the parsers.
139  * rte_flow_item is using big endian, rte_flow_attr and
140  * rte_flow_action are using CPU order.
141  * Because the pattern is used to describe the packets,
142  * normally the packets should use network order.
143  */
144
145 /**
146  * Parse the rule to see if it is a n-tuple rule.
147  * And get the n-tuple filter info BTW.
148  * pattern:
149  * The first not void item can be ETH or IPV4.
150  * The second not void item must be IPV4 if the first one is ETH.
151  * The third not void item must be UDP or TCP.
152  * The next not void item must be END.
153  * action:
154  * The first not void action should be QUEUE.
155  * The next not void action should be END.
156  * pattern example:
157  * ITEM         Spec                    Mask
158  * ETH          NULL                    NULL
159  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
160  *              dst_addr 192.167.3.50   0xFFFFFFFF
161  *              next_proto_id   17      0xFF
162  * UDP/TCP/     src_port        80      0xFFFF
163  * SCTP         dst_port        80      0xFFFF
164  * END
165  * other members in mask and spec should set to 0x00.
166  * item->last should be NULL.
167  *
168  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
169  *
170  */
171 static int
172 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
173                          const struct rte_flow_item pattern[],
174                          const struct rte_flow_action actions[],
175                          struct rte_eth_ntuple_filter *filter,
176                          struct rte_flow_error *error)
177 {
178         const struct rte_flow_item *item;
179         const struct rte_flow_action *act;
180         const struct rte_flow_item_ipv4 *ipv4_spec;
181         const struct rte_flow_item_ipv4 *ipv4_mask;
182         const struct rte_flow_item_tcp *tcp_spec;
183         const struct rte_flow_item_tcp *tcp_mask;
184         const struct rte_flow_item_udp *udp_spec;
185         const struct rte_flow_item_udp *udp_mask;
186         const struct rte_flow_item_sctp *sctp_spec;
187         const struct rte_flow_item_sctp *sctp_mask;
188         const struct rte_flow_item_eth *eth_spec;
189         const struct rte_flow_item_eth *eth_mask;
190         const struct rte_flow_item_vlan *vlan_spec;
191         const struct rte_flow_item_vlan *vlan_mask;
192         struct rte_flow_item_eth eth_null;
193         struct rte_flow_item_vlan vlan_null;
194
195         if (!pattern) {
196                 rte_flow_error_set(error,
197                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
198                         NULL, "NULL pattern.");
199                 return -rte_errno;
200         }
201
202         if (!actions) {
203                 rte_flow_error_set(error, EINVAL,
204                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
205                                    NULL, "NULL action.");
206                 return -rte_errno;
207         }
208         if (!attr) {
209                 rte_flow_error_set(error, EINVAL,
210                                    RTE_FLOW_ERROR_TYPE_ATTR,
211                                    NULL, "NULL attribute.");
212                 return -rte_errno;
213         }
214
215         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
216         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
217
218 #ifdef RTE_LIB_SECURITY
219         /**
220          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
221          */
222         act = next_no_void_action(actions, NULL);
223         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
224                 const void *conf = act->conf;
225                 /* check if the next not void item is END */
226                 act = next_no_void_action(actions, act);
227                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
228                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229                         rte_flow_error_set(error, EINVAL,
230                                 RTE_FLOW_ERROR_TYPE_ACTION,
231                                 act, "Not supported action.");
232                         return -rte_errno;
233                 }
234
235                 /* get the IP pattern*/
236                 item = next_no_void_pattern(pattern, NULL);
237                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
238                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
239                         if (item->last ||
240                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
241                                 rte_flow_error_set(error, EINVAL,
242                                         RTE_FLOW_ERROR_TYPE_ITEM,
243                                         item, "IP pattern missing.");
244                                 return -rte_errno;
245                         }
246                         item = next_no_void_pattern(pattern, item);
247                 }
248
249                 filter->proto = IPPROTO_ESP;
250                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
251                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
252         }
253 #endif
254
255         /* the first not void item can be MAC or IPv4 */
256         item = next_no_void_pattern(pattern, NULL);
257
258         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
259             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260                 rte_flow_error_set(error, EINVAL,
261                         RTE_FLOW_ERROR_TYPE_ITEM,
262                         item, "Not supported by ntuple filter");
263                 return -rte_errno;
264         }
265         /* Skip Ethernet */
266         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
267                 eth_spec = item->spec;
268                 eth_mask = item->mask;
269                 /*Not supported last point for range*/
270                 if (item->last) {
271                         rte_flow_error_set(error,
272                           EINVAL,
273                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274                           item, "Not supported last point for range");
275                         return -rte_errno;
276
277                 }
278                 /* if the first item is MAC, the content should be NULL */
279                 if ((item->spec || item->mask) &&
280                         (memcmp(eth_spec, &eth_null,
281                                 sizeof(struct rte_flow_item_eth)) ||
282                          memcmp(eth_mask, &eth_null,
283                                 sizeof(struct rte_flow_item_eth)))) {
284                         rte_flow_error_set(error, EINVAL,
285                                 RTE_FLOW_ERROR_TYPE_ITEM,
286                                 item, "Not supported by ntuple filter");
287                         return -rte_errno;
288                 }
289                 /* check if the next not void item is IPv4 or Vlan */
290                 item = next_no_void_pattern(pattern, item);
291                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
292                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
293                         rte_flow_error_set(error,
294                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
295                           item, "Not supported by ntuple filter");
296                           return -rte_errno;
297                 }
298         }
299
300         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
301                 vlan_spec = item->spec;
302                 vlan_mask = item->mask;
303                 /*Not supported last point for range*/
304                 if (item->last) {
305                         rte_flow_error_set(error,
306                           EINVAL,
307                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
308                           item, "Not supported last point for range");
309                         return -rte_errno;
310                 }
311                 /* the content should be NULL */
312                 if ((item->spec || item->mask) &&
313                         (memcmp(vlan_spec, &vlan_null,
314                                 sizeof(struct rte_flow_item_vlan)) ||
315                          memcmp(vlan_mask, &vlan_null,
316                                 sizeof(struct rte_flow_item_vlan)))) {
317
318                         rte_flow_error_set(error, EINVAL,
319                                 RTE_FLOW_ERROR_TYPE_ITEM,
320                                 item, "Not supported by ntuple filter");
321                         return -rte_errno;
322                 }
323                 /* check if the next not void item is IPv4 */
324                 item = next_no_void_pattern(pattern, item);
325                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
326                         rte_flow_error_set(error,
327                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
328                           item, "Not supported by ntuple filter");
329                         return -rte_errno;
330                 }
331         }
332
333         if (item->mask) {
334                 /* get the IPv4 info */
335                 if (!item->spec || !item->mask) {
336                         rte_flow_error_set(error, EINVAL,
337                                 RTE_FLOW_ERROR_TYPE_ITEM,
338                                 item, "Invalid ntuple mask");
339                         return -rte_errno;
340                 }
341                 /*Not supported last point for range*/
342                 if (item->last) {
343                         rte_flow_error_set(error, EINVAL,
344                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
345                                 item, "Not supported last point for range");
346                         return -rte_errno;
347                 }
348
349                 ipv4_mask = item->mask;
350                 /**
351                  * Only support src & dst addresses, protocol,
352                  * others should be masked.
353                  */
354                 if (ipv4_mask->hdr.version_ihl ||
355                     ipv4_mask->hdr.type_of_service ||
356                     ipv4_mask->hdr.total_length ||
357                     ipv4_mask->hdr.packet_id ||
358                     ipv4_mask->hdr.fragment_offset ||
359                     ipv4_mask->hdr.time_to_live ||
360                     ipv4_mask->hdr.hdr_checksum) {
361                         rte_flow_error_set(error,
362                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363                                 item, "Not supported by ntuple filter");
364                         return -rte_errno;
365                 }
366                 if ((ipv4_mask->hdr.src_addr != 0 &&
367                         ipv4_mask->hdr.src_addr != UINT32_MAX) ||
368                         (ipv4_mask->hdr.dst_addr != 0 &&
369                         ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
370                         (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
371                         ipv4_mask->hdr.next_proto_id != 0)) {
372                         rte_flow_error_set(error,
373                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
374                                 item, "Not supported by ntuple filter");
375                         return -rte_errno;
376                 }
377
378                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
379                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
380                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
381
382                 ipv4_spec = item->spec;
383                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
384                 filter->src_ip = ipv4_spec->hdr.src_addr;
385                 filter->proto  = ipv4_spec->hdr.next_proto_id;
386         }
387
388         /* check if the next not void item is TCP or UDP */
389         item = next_no_void_pattern(pattern, item);
390         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
391             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
392             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
393             item->type != RTE_FLOW_ITEM_TYPE_END) {
394                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
395                 rte_flow_error_set(error, EINVAL,
396                         RTE_FLOW_ERROR_TYPE_ITEM,
397                         item, "Not supported by ntuple filter");
398                 return -rte_errno;
399         }
400
401         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
402                 (!item->spec && !item->mask)) {
403                 goto action;
404         }
405
406         /* get the TCP/UDP/SCTP info */
407         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
408                 (!item->spec || !item->mask)) {
409                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
410                 rte_flow_error_set(error, EINVAL,
411                         RTE_FLOW_ERROR_TYPE_ITEM,
412                         item, "Invalid ntuple mask");
413                 return -rte_errno;
414         }
415
416         /*Not supported last point for range*/
417         if (item->last) {
418                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
419                 rte_flow_error_set(error, EINVAL,
420                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
421                         item, "Not supported last point for range");
422                 return -rte_errno;
423
424         }
425
426         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
427                 tcp_mask = item->mask;
428
429                 /**
430                  * Only support src & dst ports, tcp flags,
431                  * others should be masked.
432                  */
433                 if (tcp_mask->hdr.sent_seq ||
434                     tcp_mask->hdr.recv_ack ||
435                     tcp_mask->hdr.data_off ||
436                     tcp_mask->hdr.rx_win ||
437                     tcp_mask->hdr.cksum ||
438                     tcp_mask->hdr.tcp_urp) {
439                         memset(filter, 0,
440                                 sizeof(struct rte_eth_ntuple_filter));
441                         rte_flow_error_set(error, EINVAL,
442                                 RTE_FLOW_ERROR_TYPE_ITEM,
443                                 item, "Not supported by ntuple filter");
444                         return -rte_errno;
445                 }
446                 if ((tcp_mask->hdr.src_port != 0 &&
447                         tcp_mask->hdr.src_port != UINT16_MAX) ||
448                         (tcp_mask->hdr.dst_port != 0 &&
449                         tcp_mask->hdr.dst_port != UINT16_MAX)) {
450                         rte_flow_error_set(error,
451                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
452                                 item, "Not supported by ntuple filter");
453                         return -rte_errno;
454                 }
455
456                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
457                 filter->src_port_mask  = tcp_mask->hdr.src_port;
458                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
459                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
460                 } else if (!tcp_mask->hdr.tcp_flags) {
461                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
462                 } else {
463                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
464                         rte_flow_error_set(error, EINVAL,
465                                 RTE_FLOW_ERROR_TYPE_ITEM,
466                                 item, "Not supported by ntuple filter");
467                         return -rte_errno;
468                 }
469
470                 tcp_spec = item->spec;
471                 filter->dst_port  = tcp_spec->hdr.dst_port;
472                 filter->src_port  = tcp_spec->hdr.src_port;
473                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
474         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
475                 udp_mask = item->mask;
476
477                 /**
478                  * Only support src & dst ports,
479                  * others should be masked.
480                  */
481                 if (udp_mask->hdr.dgram_len ||
482                     udp_mask->hdr.dgram_cksum) {
483                         memset(filter, 0,
484                                 sizeof(struct rte_eth_ntuple_filter));
485                         rte_flow_error_set(error, EINVAL,
486                                 RTE_FLOW_ERROR_TYPE_ITEM,
487                                 item, "Not supported by ntuple filter");
488                         return -rte_errno;
489                 }
490                 if ((udp_mask->hdr.src_port != 0 &&
491                         udp_mask->hdr.src_port != UINT16_MAX) ||
492                         (udp_mask->hdr.dst_port != 0 &&
493                         udp_mask->hdr.dst_port != UINT16_MAX)) {
494                         rte_flow_error_set(error,
495                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
496                                 item, "Not supported by ntuple filter");
497                         return -rte_errno;
498                 }
499
500                 filter->dst_port_mask = udp_mask->hdr.dst_port;
501                 filter->src_port_mask = udp_mask->hdr.src_port;
502
503                 udp_spec = item->spec;
504                 filter->dst_port = udp_spec->hdr.dst_port;
505                 filter->src_port = udp_spec->hdr.src_port;
506         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
507                 sctp_mask = item->mask;
508
509                 /**
510                  * Only support src & dst ports,
511                  * others should be masked.
512                  */
513                 if (sctp_mask->hdr.tag ||
514                     sctp_mask->hdr.cksum) {
515                         memset(filter, 0,
516                                 sizeof(struct rte_eth_ntuple_filter));
517                         rte_flow_error_set(error, EINVAL,
518                                 RTE_FLOW_ERROR_TYPE_ITEM,
519                                 item, "Not supported by ntuple filter");
520                         return -rte_errno;
521                 }
522
523                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
524                 filter->src_port_mask = sctp_mask->hdr.src_port;
525
526                 sctp_spec = item->spec;
527                 filter->dst_port = sctp_spec->hdr.dst_port;
528                 filter->src_port = sctp_spec->hdr.src_port;
529         } else {
530                 goto action;
531         }
532
533         /* check if the next not void item is END */
534         item = next_no_void_pattern(pattern, item);
535         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
536                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
537                 rte_flow_error_set(error, EINVAL,
538                         RTE_FLOW_ERROR_TYPE_ITEM,
539                         item, "Not supported by ntuple filter");
540                 return -rte_errno;
541         }
542
543 action:
544
545         /**
546          * n-tuple only supports forwarding,
547          * check if the first not void action is QUEUE.
548          */
549         act = next_no_void_action(actions, NULL);
550         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
551                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
552                 rte_flow_error_set(error, EINVAL,
553                         RTE_FLOW_ERROR_TYPE_ACTION,
554                         item, "Not supported action.");
555                 return -rte_errno;
556         }
557         filter->queue =
558                 ((const struct rte_flow_action_queue *)act->conf)->index;
559
560         /* check if the next not void item is END */
561         act = next_no_void_action(actions, act);
562         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
563                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
564                 rte_flow_error_set(error, EINVAL,
565                         RTE_FLOW_ERROR_TYPE_ACTION,
566                         act, "Not supported action.");
567                 return -rte_errno;
568         }
569
570         /* parse attr */
571         /* must be input direction */
572         if (!attr->ingress) {
573                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
574                 rte_flow_error_set(error, EINVAL,
575                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
576                                    attr, "Only support ingress.");
577                 return -rte_errno;
578         }
579
580         /* not supported */
581         if (attr->egress) {
582                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
583                 rte_flow_error_set(error, EINVAL,
584                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
585                                    attr, "Not support egress.");
586                 return -rte_errno;
587         }
588
589         /* not supported */
590         if (attr->transfer) {
591                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
592                 rte_flow_error_set(error, EINVAL,
593                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
594                                    attr, "No support for transfer.");
595                 return -rte_errno;
596         }
597
598         if (attr->priority > 0xFFFF) {
599                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
600                 rte_flow_error_set(error, EINVAL,
601                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
602                                    attr, "Error priority.");
603                 return -rte_errno;
604         }
605         filter->priority = (uint16_t)attr->priority;
606         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
607             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
608             filter->priority = 1;
609
610         return 0;
611 }
612
613 /* a specific function for ixgbe because the flags is specific */
614 static int
615 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
616                           const struct rte_flow_attr *attr,
617                           const struct rte_flow_item pattern[],
618                           const struct rte_flow_action actions[],
619                           struct rte_eth_ntuple_filter *filter,
620                           struct rte_flow_error *error)
621 {
622         int ret;
623         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
624
625         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
626
627         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
628
629         if (ret)
630                 return ret;
631
632 #ifdef RTE_LIB_SECURITY
633         /* ESP flow not really a flow*/
634         if (filter->proto == IPPROTO_ESP)
635                 return 0;
636 #endif
637
638         /* Ixgbe doesn't support tcp flags. */
639         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
640                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
641                 rte_flow_error_set(error, EINVAL,
642                                    RTE_FLOW_ERROR_TYPE_ITEM,
643                                    NULL, "Not supported by ntuple filter");
644                 return -rte_errno;
645         }
646
647         /* Ixgbe doesn't support many priorities. */
648         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
649             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
650                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
651                 rte_flow_error_set(error, EINVAL,
652                         RTE_FLOW_ERROR_TYPE_ITEM,
653                         NULL, "Priority not supported by ntuple filter");
654                 return -rte_errno;
655         }
656
657         if (filter->queue >= dev->data->nb_rx_queues)
658                 return -rte_errno;
659
660         /* fixed value for ixgbe */
661         filter->flags = RTE_5TUPLE_FLAGS;
662         return 0;
663 }
664
665 /**
666  * Parse the rule to see if it is a ethertype rule.
667  * And get the ethertype filter info BTW.
668  * pattern:
669  * The first not void item can be ETH.
670  * The next not void item must be END.
671  * action:
672  * The first not void action should be QUEUE.
673  * The next not void action should be END.
674  * pattern example:
675  * ITEM         Spec                    Mask
676  * ETH          type    0x0807          0xFFFF
677  * END
678  * other members in mask and spec should set to 0x00.
679  * item->last should be NULL.
680  */
681 static int
682 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
683                             const struct rte_flow_item *pattern,
684                             const struct rte_flow_action *actions,
685                             struct rte_eth_ethertype_filter *filter,
686                             struct rte_flow_error *error)
687 {
688         const struct rte_flow_item *item;
689         const struct rte_flow_action *act;
690         const struct rte_flow_item_eth *eth_spec;
691         const struct rte_flow_item_eth *eth_mask;
692         const struct rte_flow_action_queue *act_q;
693
694         if (!pattern) {
695                 rte_flow_error_set(error, EINVAL,
696                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
697                                 NULL, "NULL pattern.");
698                 return -rte_errno;
699         }
700
701         if (!actions) {
702                 rte_flow_error_set(error, EINVAL,
703                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
704                                 NULL, "NULL action.");
705                 return -rte_errno;
706         }
707
708         if (!attr) {
709                 rte_flow_error_set(error, EINVAL,
710                                    RTE_FLOW_ERROR_TYPE_ATTR,
711                                    NULL, "NULL attribute.");
712                 return -rte_errno;
713         }
714
715         item = next_no_void_pattern(pattern, NULL);
716         /* The first non-void item should be MAC. */
717         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
718                 rte_flow_error_set(error, EINVAL,
719                         RTE_FLOW_ERROR_TYPE_ITEM,
720                         item, "Not supported by ethertype filter");
721                 return -rte_errno;
722         }
723
724         /*Not supported last point for range*/
725         if (item->last) {
726                 rte_flow_error_set(error, EINVAL,
727                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
728                         item, "Not supported last point for range");
729                 return -rte_errno;
730         }
731
732         /* Get the MAC info. */
733         if (!item->spec || !item->mask) {
734                 rte_flow_error_set(error, EINVAL,
735                                 RTE_FLOW_ERROR_TYPE_ITEM,
736                                 item, "Not supported by ethertype filter");
737                 return -rte_errno;
738         }
739
740         eth_spec = item->spec;
741         eth_mask = item->mask;
742
743         /* Mask bits of source MAC address must be full of 0.
744          * Mask bits of destination MAC address must be full
745          * of 1 or full of 0.
746          */
747         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
748             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
749              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
750                 rte_flow_error_set(error, EINVAL,
751                                 RTE_FLOW_ERROR_TYPE_ITEM,
752                                 item, "Invalid ether address mask");
753                 return -rte_errno;
754         }
755
756         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
757                 rte_flow_error_set(error, EINVAL,
758                                 RTE_FLOW_ERROR_TYPE_ITEM,
759                                 item, "Invalid ethertype mask");
760                 return -rte_errno;
761         }
762
763         /* If mask bits of destination MAC address
764          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
765          */
766         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
767                 filter->mac_addr = eth_spec->dst;
768                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
769         } else {
770                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
771         }
772         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
773
774         /* Check if the next non-void item is END. */
775         item = next_no_void_pattern(pattern, item);
776         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
777                 rte_flow_error_set(error, EINVAL,
778                                 RTE_FLOW_ERROR_TYPE_ITEM,
779                                 item, "Not supported by ethertype filter.");
780                 return -rte_errno;
781         }
782
783         /* Parse action */
784
785         act = next_no_void_action(actions, NULL);
786         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
787             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
788                 rte_flow_error_set(error, EINVAL,
789                                 RTE_FLOW_ERROR_TYPE_ACTION,
790                                 act, "Not supported action.");
791                 return -rte_errno;
792         }
793
794         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
795                 act_q = (const struct rte_flow_action_queue *)act->conf;
796                 filter->queue = act_q->index;
797         } else {
798                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
799         }
800
801         /* Check if the next non-void item is END */
802         act = next_no_void_action(actions, act);
803         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
804                 rte_flow_error_set(error, EINVAL,
805                                 RTE_FLOW_ERROR_TYPE_ACTION,
806                                 act, "Not supported action.");
807                 return -rte_errno;
808         }
809
810         /* Parse attr */
811         /* Must be input direction */
812         if (!attr->ingress) {
813                 rte_flow_error_set(error, EINVAL,
814                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
815                                 attr, "Only support ingress.");
816                 return -rte_errno;
817         }
818
819         /* Not supported */
820         if (attr->egress) {
821                 rte_flow_error_set(error, EINVAL,
822                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
823                                 attr, "Not support egress.");
824                 return -rte_errno;
825         }
826
827         /* Not supported */
828         if (attr->transfer) {
829                 rte_flow_error_set(error, EINVAL,
830                                 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
831                                 attr, "No support for transfer.");
832                 return -rte_errno;
833         }
834
835         /* Not supported */
836         if (attr->priority) {
837                 rte_flow_error_set(error, EINVAL,
838                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
839                                 attr, "Not support priority.");
840                 return -rte_errno;
841         }
842
843         /* Not supported */
844         if (attr->group) {
845                 rte_flow_error_set(error, EINVAL,
846                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
847                                 attr, "Not support group.");
848                 return -rte_errno;
849         }
850
851         return 0;
852 }
853
854 static int
855 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
856                                  const struct rte_flow_attr *attr,
857                              const struct rte_flow_item pattern[],
858                              const struct rte_flow_action actions[],
859                              struct rte_eth_ethertype_filter *filter,
860                              struct rte_flow_error *error)
861 {
862         int ret;
863         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
864
865         MAC_TYPE_FILTER_SUP(hw->mac.type);
866
867         ret = cons_parse_ethertype_filter(attr, pattern,
868                                         actions, filter, error);
869
870         if (ret)
871                 return ret;
872
873         if (filter->queue >= dev->data->nb_rx_queues) {
874                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
875                 rte_flow_error_set(error, EINVAL,
876                         RTE_FLOW_ERROR_TYPE_ITEM,
877                         NULL, "queue index much too big");
878                 return -rte_errno;
879         }
880
881         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
882                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
883                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
884                 rte_flow_error_set(error, EINVAL,
885                         RTE_FLOW_ERROR_TYPE_ITEM,
886                         NULL, "IPv4/IPv6 not supported by ethertype filter");
887                 return -rte_errno;
888         }
889
890         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
891                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
892                 rte_flow_error_set(error, EINVAL,
893                         RTE_FLOW_ERROR_TYPE_ITEM,
894                         NULL, "mac compare is unsupported");
895                 return -rte_errno;
896         }
897
898         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
899                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
900                 rte_flow_error_set(error, EINVAL,
901                         RTE_FLOW_ERROR_TYPE_ITEM,
902                         NULL, "drop option is unsupported");
903                 return -rte_errno;
904         }
905
906         return 0;
907 }
908
909 /**
910  * Parse the rule to see if it is a TCP SYN rule.
911  * And get the TCP SYN filter info BTW.
912  * pattern:
913  * The first not void item must be ETH.
914  * The second not void item must be IPV4 or IPV6.
915  * The third not void item must be TCP.
916  * The next not void item must be END.
917  * action:
918  * The first not void action should be QUEUE.
919  * The next not void action should be END.
920  * pattern example:
921  * ITEM         Spec                    Mask
922  * ETH          NULL                    NULL
923  * IPV4/IPV6    NULL                    NULL
924  * TCP          tcp_flags       0x02    0xFF
925  * END
926  * other members in mask and spec should set to 0x00.
927  * item->last should be NULL.
928  */
929 static int
930 cons_parse_syn_filter(const struct rte_flow_attr *attr,
931                                 const struct rte_flow_item pattern[],
932                                 const struct rte_flow_action actions[],
933                                 struct rte_eth_syn_filter *filter,
934                                 struct rte_flow_error *error)
935 {
936         const struct rte_flow_item *item;
937         const struct rte_flow_action *act;
938         const struct rte_flow_item_tcp *tcp_spec;
939         const struct rte_flow_item_tcp *tcp_mask;
940         const struct rte_flow_action_queue *act_q;
941
942         if (!pattern) {
943                 rte_flow_error_set(error, EINVAL,
944                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
945                                 NULL, "NULL pattern.");
946                 return -rte_errno;
947         }
948
949         if (!actions) {
950                 rte_flow_error_set(error, EINVAL,
951                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
952                                 NULL, "NULL action.");
953                 return -rte_errno;
954         }
955
956         if (!attr) {
957                 rte_flow_error_set(error, EINVAL,
958                                    RTE_FLOW_ERROR_TYPE_ATTR,
959                                    NULL, "NULL attribute.");
960                 return -rte_errno;
961         }
962
963
964         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
965         item = next_no_void_pattern(pattern, NULL);
966         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
967             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
968             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
969             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
970                 rte_flow_error_set(error, EINVAL,
971                                 RTE_FLOW_ERROR_TYPE_ITEM,
972                                 item, "Not supported by syn filter");
973                 return -rte_errno;
974         }
975                 /*Not supported last point for range*/
976         if (item->last) {
977                 rte_flow_error_set(error, EINVAL,
978                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
979                         item, "Not supported last point for range");
980                 return -rte_errno;
981         }
982
983         /* Skip Ethernet */
984         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
985                 /* if the item is MAC, the content should be NULL */
986                 if (item->spec || item->mask) {
987                         rte_flow_error_set(error, EINVAL,
988                                 RTE_FLOW_ERROR_TYPE_ITEM,
989                                 item, "Invalid SYN address mask");
990                         return -rte_errno;
991                 }
992
993                 /* check if the next not void item is IPv4 or IPv6 */
994                 item = next_no_void_pattern(pattern, item);
995                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
996                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
997                         rte_flow_error_set(error, EINVAL,
998                                 RTE_FLOW_ERROR_TYPE_ITEM,
999                                 item, "Not supported by syn filter");
1000                         return -rte_errno;
1001                 }
1002         }
1003
1004         /* Skip IP */
1005         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1006             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1007                 /* if the item is IP, the content should be NULL */
1008                 if (item->spec || item->mask) {
1009                         rte_flow_error_set(error, EINVAL,
1010                                 RTE_FLOW_ERROR_TYPE_ITEM,
1011                                 item, "Invalid SYN mask");
1012                         return -rte_errno;
1013                 }
1014
1015                 /* check if the next not void item is TCP */
1016                 item = next_no_void_pattern(pattern, item);
1017                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
1018                         rte_flow_error_set(error, EINVAL,
1019                                 RTE_FLOW_ERROR_TYPE_ITEM,
1020                                 item, "Not supported by syn filter");
1021                         return -rte_errno;
1022                 }
1023         }
1024
1025         /* Get the TCP info. Only support SYN. */
1026         if (!item->spec || !item->mask) {
1027                 rte_flow_error_set(error, EINVAL,
1028                                 RTE_FLOW_ERROR_TYPE_ITEM,
1029                                 item, "Invalid SYN mask");
1030                 return -rte_errno;
1031         }
1032         /*Not supported last point for range*/
1033         if (item->last) {
1034                 rte_flow_error_set(error, EINVAL,
1035                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1036                         item, "Not supported last point for range");
1037                 return -rte_errno;
1038         }
1039
1040         tcp_spec = item->spec;
1041         tcp_mask = item->mask;
1042         if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
1043             tcp_mask->hdr.src_port ||
1044             tcp_mask->hdr.dst_port ||
1045             tcp_mask->hdr.sent_seq ||
1046             tcp_mask->hdr.recv_ack ||
1047             tcp_mask->hdr.data_off ||
1048             tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
1049             tcp_mask->hdr.rx_win ||
1050             tcp_mask->hdr.cksum ||
1051             tcp_mask->hdr.tcp_urp) {
1052                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1053                 rte_flow_error_set(error, EINVAL,
1054                                 RTE_FLOW_ERROR_TYPE_ITEM,
1055                                 item, "Not supported by syn filter");
1056                 return -rte_errno;
1057         }
1058
1059         /* check if the next not void item is END */
1060         item = next_no_void_pattern(pattern, item);
1061         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1062                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1063                 rte_flow_error_set(error, EINVAL,
1064                                 RTE_FLOW_ERROR_TYPE_ITEM,
1065                                 item, "Not supported by syn filter");
1066                 return -rte_errno;
1067         }
1068
1069         /* check if the first not void action is QUEUE. */
1070         act = next_no_void_action(actions, NULL);
1071         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1072                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1073                 rte_flow_error_set(error, EINVAL,
1074                                 RTE_FLOW_ERROR_TYPE_ACTION,
1075                                 act, "Not supported action.");
1076                 return -rte_errno;
1077         }
1078
1079         act_q = (const struct rte_flow_action_queue *)act->conf;
1080         filter->queue = act_q->index;
1081         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1082                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1083                 rte_flow_error_set(error, EINVAL,
1084                                 RTE_FLOW_ERROR_TYPE_ACTION,
1085                                 act, "Not supported action.");
1086                 return -rte_errno;
1087         }
1088
1089         /* check if the next not void item is END */
1090         act = next_no_void_action(actions, act);
1091         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1092                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1093                 rte_flow_error_set(error, EINVAL,
1094                                 RTE_FLOW_ERROR_TYPE_ACTION,
1095                                 act, "Not supported action.");
1096                 return -rte_errno;
1097         }
1098
1099         /* parse attr */
1100         /* must be input direction */
1101         if (!attr->ingress) {
1102                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1103                 rte_flow_error_set(error, EINVAL,
1104                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1105                         attr, "Only support ingress.");
1106                 return -rte_errno;
1107         }
1108
1109         /* not supported */
1110         if (attr->egress) {
1111                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1112                 rte_flow_error_set(error, EINVAL,
1113                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1114                         attr, "Not support egress.");
1115                 return -rte_errno;
1116         }
1117
1118         /* not supported */
1119         if (attr->transfer) {
1120                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1121                 rte_flow_error_set(error, EINVAL,
1122                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1123                         attr, "No support for transfer.");
1124                 return -rte_errno;
1125         }
1126
1127         /* Support 2 priorities, the lowest or highest. */
1128         if (!attr->priority) {
1129                 filter->hig_pri = 0;
1130         } else if (attr->priority == (uint32_t)~0U) {
1131                 filter->hig_pri = 1;
1132         } else {
1133                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1134                 rte_flow_error_set(error, EINVAL,
1135                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1136                         attr, "Not support priority.");
1137                 return -rte_errno;
1138         }
1139
1140         return 0;
1141 }
1142
1143 static int
1144 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1145                                  const struct rte_flow_attr *attr,
1146                              const struct rte_flow_item pattern[],
1147                              const struct rte_flow_action actions[],
1148                              struct rte_eth_syn_filter *filter,
1149                              struct rte_flow_error *error)
1150 {
1151         int ret;
1152         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1153
1154         MAC_TYPE_FILTER_SUP(hw->mac.type);
1155
1156         ret = cons_parse_syn_filter(attr, pattern,
1157                                         actions, filter, error);
1158
1159         if (filter->queue >= dev->data->nb_rx_queues)
1160                 return -rte_errno;
1161
1162         if (ret)
1163                 return ret;
1164
1165         return 0;
1166 }
1167
1168 /**
1169  * Parse the rule to see if it is a L2 tunnel rule.
1170  * And get the L2 tunnel filter info BTW.
1171  * Only support E-tag now.
1172  * pattern:
1173  * The first not void item can be E_TAG.
1174  * The next not void item must be END.
1175  * action:
1176  * The first not void action should be VF or PF.
1177  * The next not void action should be END.
1178  * pattern example:
1179  * ITEM         Spec                    Mask
1180  * E_TAG        grp             0x1     0x3
1181                 e_cid_base      0x309   0xFFF
1182  * END
1183  * other members in mask and spec should set to 0x00.
1184  * item->last should be NULL.
1185  */
1186 static int
1187 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1188                         const struct rte_flow_attr *attr,
1189                         const struct rte_flow_item pattern[],
1190                         const struct rte_flow_action actions[],
1191                         struct ixgbe_l2_tunnel_conf *filter,
1192                         struct rte_flow_error *error)
1193 {
1194         const struct rte_flow_item *item;
1195         const struct rte_flow_item_e_tag *e_tag_spec;
1196         const struct rte_flow_item_e_tag *e_tag_mask;
1197         const struct rte_flow_action *act;
1198         const struct rte_flow_action_vf *act_vf;
1199         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1200
1201         if (!pattern) {
1202                 rte_flow_error_set(error, EINVAL,
1203                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1204                         NULL, "NULL pattern.");
1205                 return -rte_errno;
1206         }
1207
1208         if (!actions) {
1209                 rte_flow_error_set(error, EINVAL,
1210                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1211                                    NULL, "NULL action.");
1212                 return -rte_errno;
1213         }
1214
1215         if (!attr) {
1216                 rte_flow_error_set(error, EINVAL,
1217                                    RTE_FLOW_ERROR_TYPE_ATTR,
1218                                    NULL, "NULL attribute.");
1219                 return -rte_errno;
1220         }
1221
1222         /* The first not void item should be e-tag. */
1223         item = next_no_void_pattern(pattern, NULL);
1224         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1225                 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1226                 rte_flow_error_set(error, EINVAL,
1227                         RTE_FLOW_ERROR_TYPE_ITEM,
1228                         item, "Not supported by L2 tunnel filter");
1229                 return -rte_errno;
1230         }
1231
1232         if (!item->spec || !item->mask) {
1233                 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1234                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1235                         item, "Not supported by L2 tunnel filter");
1236                 return -rte_errno;
1237         }
1238
1239         /*Not supported last point for range*/
1240         if (item->last) {
1241                 rte_flow_error_set(error, EINVAL,
1242                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1243                         item, "Not supported last point for range");
1244                 return -rte_errno;
1245         }
1246
1247         e_tag_spec = item->spec;
1248         e_tag_mask = item->mask;
1249
1250         /* Only care about GRP and E cid base. */
1251         if (e_tag_mask->epcp_edei_in_ecid_b ||
1252             e_tag_mask->in_ecid_e ||
1253             e_tag_mask->ecid_e ||
1254             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1255                 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1256                 rte_flow_error_set(error, EINVAL,
1257                         RTE_FLOW_ERROR_TYPE_ITEM,
1258                         item, "Not supported by L2 tunnel filter");
1259                 return -rte_errno;
1260         }
1261
1262         filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
1263         /**
1264          * grp and e_cid_base are bit fields and only use 14 bits.
1265          * e-tag id is taken as little endian by HW.
1266          */
1267         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1268
1269         /* check if the next not void item is END */
1270         item = next_no_void_pattern(pattern, item);
1271         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1272                 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1273                 rte_flow_error_set(error, EINVAL,
1274                         RTE_FLOW_ERROR_TYPE_ITEM,
1275                         item, "Not supported by L2 tunnel filter");
1276                 return -rte_errno;
1277         }
1278
1279         /* parse attr */
1280         /* must be input direction */
1281         if (!attr->ingress) {
1282                 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1283                 rte_flow_error_set(error, EINVAL,
1284                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1285                         attr, "Only support ingress.");
1286                 return -rte_errno;
1287         }
1288
1289         /* not supported */
1290         if (attr->egress) {
1291                 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1292                 rte_flow_error_set(error, EINVAL,
1293                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1294                         attr, "Not support egress.");
1295                 return -rte_errno;
1296         }
1297
1298         /* not supported */
1299         if (attr->transfer) {
1300                 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1301                 rte_flow_error_set(error, EINVAL,
1302                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1303                         attr, "No support for transfer.");
1304                 return -rte_errno;
1305         }
1306
1307         /* not supported */
1308         if (attr->priority) {
1309                 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1310                 rte_flow_error_set(error, EINVAL,
1311                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1312                         attr, "Not support priority.");
1313                 return -rte_errno;
1314         }
1315
1316         /* check if the first not void action is VF or PF. */
1317         act = next_no_void_action(actions, NULL);
1318         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1319                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1320                 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1321                 rte_flow_error_set(error, EINVAL,
1322                         RTE_FLOW_ERROR_TYPE_ACTION,
1323                         act, "Not supported action.");
1324                 return -rte_errno;
1325         }
1326
1327         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1328                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1329                 filter->pool = act_vf->id;
1330         } else {
1331                 filter->pool = pci_dev->max_vfs;
1332         }
1333
1334         /* check if the next not void item is END */
1335         act = next_no_void_action(actions, act);
1336         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1337                 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1338                 rte_flow_error_set(error, EINVAL,
1339                         RTE_FLOW_ERROR_TYPE_ACTION,
1340                         act, "Not supported action.");
1341                 return -rte_errno;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static int
1348 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1349                         const struct rte_flow_attr *attr,
1350                         const struct rte_flow_item pattern[],
1351                         const struct rte_flow_action actions[],
1352                         struct ixgbe_l2_tunnel_conf *l2_tn_filter,
1353                         struct rte_flow_error *error)
1354 {
1355         int ret = 0;
1356         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1357         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1358         uint16_t vf_num;
1359
1360         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1361                                 actions, l2_tn_filter, error);
1362
1363         if (hw->mac.type != ixgbe_mac_X550 &&
1364                 hw->mac.type != ixgbe_mac_X550EM_x &&
1365                 hw->mac.type != ixgbe_mac_X550EM_a) {
1366                 memset(l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1367                 rte_flow_error_set(error, EINVAL,
1368                         RTE_FLOW_ERROR_TYPE_ITEM,
1369                         NULL, "Not supported by L2 tunnel filter");
1370                 return -rte_errno;
1371         }
1372
1373         vf_num = pci_dev->max_vfs;
1374
1375         if (l2_tn_filter->pool > vf_num)
1376                 return -rte_errno;
1377
1378         return ret;
1379 }
1380
1381 /* Parse to get the attr and action info of flow director rule. */
1382 static int
1383 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1384                           const struct rte_flow_action actions[],
1385                           struct ixgbe_fdir_rule *rule,
1386                           struct rte_flow_error *error)
1387 {
1388         const struct rte_flow_action *act;
1389         const struct rte_flow_action_queue *act_q;
1390         const struct rte_flow_action_mark *mark;
1391
1392         /* parse attr */
1393         /* must be input direction */
1394         if (!attr->ingress) {
1395                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1396                 rte_flow_error_set(error, EINVAL,
1397                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1398                         attr, "Only support ingress.");
1399                 return -rte_errno;
1400         }
1401
1402         /* not supported */
1403         if (attr->egress) {
1404                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1405                 rte_flow_error_set(error, EINVAL,
1406                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1407                         attr, "Not support egress.");
1408                 return -rte_errno;
1409         }
1410
1411         /* not supported */
1412         if (attr->transfer) {
1413                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1414                 rte_flow_error_set(error, EINVAL,
1415                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1416                         attr, "No support for transfer.");
1417                 return -rte_errno;
1418         }
1419
1420         /* not supported */
1421         if (attr->priority) {
1422                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1423                 rte_flow_error_set(error, EINVAL,
1424                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1425                         attr, "Not support priority.");
1426                 return -rte_errno;
1427         }
1428
1429         /* check if the first not void action is QUEUE or DROP. */
1430         act = next_no_void_action(actions, NULL);
1431         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1432             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1433                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1434                 rte_flow_error_set(error, EINVAL,
1435                         RTE_FLOW_ERROR_TYPE_ACTION,
1436                         act, "Not supported action.");
1437                 return -rte_errno;
1438         }
1439
1440         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1441                 act_q = (const struct rte_flow_action_queue *)act->conf;
1442                 rule->queue = act_q->index;
1443         } else { /* drop */
1444                 /* signature mode does not support drop action. */
1445                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1446                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1447                         rte_flow_error_set(error, EINVAL,
1448                                 RTE_FLOW_ERROR_TYPE_ACTION,
1449                                 act, "Not supported action.");
1450                         return -rte_errno;
1451                 }
1452                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1453         }
1454
1455         /* check if the next not void item is MARK */
1456         act = next_no_void_action(actions, act);
1457         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1458                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1459                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1460                 rte_flow_error_set(error, EINVAL,
1461                         RTE_FLOW_ERROR_TYPE_ACTION,
1462                         act, "Not supported action.");
1463                 return -rte_errno;
1464         }
1465
1466         rule->soft_id = 0;
1467
1468         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1469                 mark = (const struct rte_flow_action_mark *)act->conf;
1470                 rule->soft_id = mark->id;
1471                 act = next_no_void_action(actions, act);
1472         }
1473
1474         /* check if the next not void item is END */
1475         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1476                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1477                 rte_flow_error_set(error, EINVAL,
1478                         RTE_FLOW_ERROR_TYPE_ACTION,
1479                         act, "Not supported action.");
1480                 return -rte_errno;
1481         }
1482
1483         return 0;
1484 }
1485
1486 /* search next no void pattern and skip fuzzy */
1487 static inline
1488 const struct rte_flow_item *next_no_fuzzy_pattern(
1489                 const struct rte_flow_item pattern[],
1490                 const struct rte_flow_item *cur)
1491 {
1492         const struct rte_flow_item *next =
1493                 next_no_void_pattern(pattern, cur);
1494         while (1) {
1495                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1496                         return next;
1497                 next = next_no_void_pattern(pattern, next);
1498         }
1499 }
1500
1501 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1502 {
1503         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1504         const struct rte_flow_item *item;
1505         uint32_t sh, lh, mh;
1506         int i = 0;
1507
1508         while (1) {
1509                 item = pattern + i;
1510                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1511                         break;
1512
1513                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1514                         spec = item->spec;
1515                         last = item->last;
1516                         mask = item->mask;
1517
1518                         if (!spec || !mask)
1519                                 return 0;
1520
1521                         sh = spec->thresh;
1522
1523                         if (!last)
1524                                 lh = sh;
1525                         else
1526                                 lh = last->thresh;
1527
1528                         mh = mask->thresh;
1529                         sh = sh & mh;
1530                         lh = lh & mh;
1531
1532                         if (!sh || sh > lh)
1533                                 return 0;
1534
1535                         return 1;
1536                 }
1537
1538                 i++;
1539         }
1540
1541         return 0;
1542 }
1543
1544 /**
1545  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1546  * And get the flow director filter info BTW.
1547  * UDP/TCP/SCTP PATTERN:
1548  * The first not void item can be ETH or IPV4 or IPV6
1549  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1550  * The next not void item could be UDP or TCP or SCTP (optional)
1551  * The next not void item could be RAW (for flexbyte, optional)
1552  * The next not void item must be END.
1553  * A Fuzzy Match pattern can appear at any place before END.
1554  * Fuzzy Match is optional for IPV4 but is required for IPV6
1555  * MAC VLAN PATTERN:
1556  * The first not void item must be ETH.
1557  * The second not void item must be MAC VLAN.
1558  * The next not void item must be END.
1559  * ACTION:
1560  * The first not void action should be QUEUE or DROP.
1561  * The second not void optional action should be MARK,
1562  * mark_id is a uint32_t number.
1563  * The next not void action should be END.
1564  * UDP/TCP/SCTP pattern example:
1565  * ITEM         Spec                    Mask
1566  * ETH          NULL                    NULL
1567  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1568  *              dst_addr 192.167.3.50   0xFFFFFFFF
1569  * UDP/TCP/SCTP src_port        80      0xFFFF
1570  *              dst_port        80      0xFFFF
1571  * FLEX relative        0       0x1
1572  *              search          0       0x1
1573  *              reserved        0       0
1574  *              offset          12      0xFFFFFFFF
1575  *              limit           0       0xFFFF
1576  *              length          2       0xFFFF
1577  *              pattern[0]      0x86    0xFF
1578  *              pattern[1]      0xDD    0xFF
1579  * END
1580  * MAC VLAN pattern example:
1581  * ITEM         Spec                    Mask
1582  * ETH          dst_addr
1583                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1584                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1585  * MAC VLAN     tci     0x2016          0xEFFF
1586  * END
1587  * Other members in mask and spec should set to 0x00.
1588  * Item->last should be NULL.
1589  */
1590 static int
1591 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1592                                const struct rte_flow_attr *attr,
1593                                const struct rte_flow_item pattern[],
1594                                const struct rte_flow_action actions[],
1595                                struct ixgbe_fdir_rule *rule,
1596                                struct rte_flow_error *error)
1597 {
1598         const struct rte_flow_item *item;
1599         const struct rte_flow_item_eth *eth_spec;
1600         const struct rte_flow_item_eth *eth_mask;
1601         const struct rte_flow_item_ipv4 *ipv4_spec;
1602         const struct rte_flow_item_ipv4 *ipv4_mask;
1603         const struct rte_flow_item_ipv6 *ipv6_spec;
1604         const struct rte_flow_item_ipv6 *ipv6_mask;
1605         const struct rte_flow_item_tcp *tcp_spec;
1606         const struct rte_flow_item_tcp *tcp_mask;
1607         const struct rte_flow_item_udp *udp_spec;
1608         const struct rte_flow_item_udp *udp_mask;
1609         const struct rte_flow_item_sctp *sctp_spec;
1610         const struct rte_flow_item_sctp *sctp_mask;
1611         const struct rte_flow_item_vlan *vlan_spec;
1612         const struct rte_flow_item_vlan *vlan_mask;
1613         const struct rte_flow_item_raw *raw_mask;
1614         const struct rte_flow_item_raw *raw_spec;
1615         uint8_t j;
1616
1617         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1618
1619         if (!pattern) {
1620                 rte_flow_error_set(error, EINVAL,
1621                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1622                         NULL, "NULL pattern.");
1623                 return -rte_errno;
1624         }
1625
1626         if (!actions) {
1627                 rte_flow_error_set(error, EINVAL,
1628                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1629                                    NULL, "NULL action.");
1630                 return -rte_errno;
1631         }
1632
1633         if (!attr) {
1634                 rte_flow_error_set(error, EINVAL,
1635                                    RTE_FLOW_ERROR_TYPE_ATTR,
1636                                    NULL, "NULL attribute.");
1637                 return -rte_errno;
1638         }
1639
1640         /**
1641          * Some fields may not be provided. Set spec to 0 and mask to default
1642          * value. So, we need not do anything for the not provided fields later.
1643          */
1644         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1645         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1646         rule->mask.vlan_tci_mask = 0;
1647         rule->mask.flex_bytes_mask = 0;
1648
1649         /**
1650          * The first not void item should be
1651          * MAC or IPv4 or TCP or UDP or SCTP.
1652          */
1653         item = next_no_fuzzy_pattern(pattern, NULL);
1654         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1655             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1656             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1657             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1658             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1659             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1660                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1661                 rte_flow_error_set(error, EINVAL,
1662                         RTE_FLOW_ERROR_TYPE_ITEM,
1663                         item, "Not supported by fdir filter");
1664                 return -rte_errno;
1665         }
1666
1667         if (signature_match(pattern))
1668                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1669         else
1670                 rule->mode = RTE_FDIR_MODE_PERFECT;
1671
1672         /*Not supported last point for range*/
1673         if (item->last) {
1674                 rte_flow_error_set(error, EINVAL,
1675                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1676                         item, "Not supported last point for range");
1677                 return -rte_errno;
1678         }
1679
1680         /* Get the MAC info. */
1681         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1682                 /**
1683                  * Only support vlan and dst MAC address,
1684                  * others should be masked.
1685                  */
1686                 if (item->spec && !item->mask) {
1687                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1688                         rte_flow_error_set(error, EINVAL,
1689                                 RTE_FLOW_ERROR_TYPE_ITEM,
1690                                 item, "Not supported by fdir filter");
1691                         return -rte_errno;
1692                 }
1693
1694                 if (item->spec) {
1695                         rule->b_spec = TRUE;
1696                         eth_spec = item->spec;
1697
1698                         /* Get the dst MAC. */
1699                         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1700                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1701                                         eth_spec->dst.addr_bytes[j];
1702                         }
1703                 }
1704
1705
1706                 if (item->mask) {
1707
1708                         rule->b_mask = TRUE;
1709                         eth_mask = item->mask;
1710
1711                         /* Ether type should be masked. */
1712                         if (eth_mask->type ||
1713                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1714                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1715                                 rte_flow_error_set(error, EINVAL,
1716                                         RTE_FLOW_ERROR_TYPE_ITEM,
1717                                         item, "Not supported by fdir filter");
1718                                 return -rte_errno;
1719                         }
1720
1721                         /* If ethernet has meaning, it means MAC VLAN mode. */
1722                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1723
1724                         /**
1725                          * src MAC address must be masked,
1726                          * and don't support dst MAC address mask.
1727                          */
1728                         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1729                                 if (eth_mask->src.addr_bytes[j] ||
1730                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1731                                         memset(rule, 0,
1732                                         sizeof(struct ixgbe_fdir_rule));
1733                                         rte_flow_error_set(error, EINVAL,
1734                                         RTE_FLOW_ERROR_TYPE_ITEM,
1735                                         item, "Not supported by fdir filter");
1736                                         return -rte_errno;
1737                                 }
1738                         }
1739
1740                         /* When no VLAN, considered as full mask. */
1741                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1742                 }
1743                 /*** If both spec and mask are item,
1744                  * it means don't care about ETH.
1745                  * Do nothing.
1746                  */
1747
1748                 /**
1749                  * Check if the next not void item is vlan or ipv4.
1750                  * IPv6 is not supported.
1751                  */
1752                 item = next_no_fuzzy_pattern(pattern, item);
1753                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1754                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1755                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1756                                 rte_flow_error_set(error, EINVAL,
1757                                         RTE_FLOW_ERROR_TYPE_ITEM,
1758                                         item, "Not supported by fdir filter");
1759                                 return -rte_errno;
1760                         }
1761                 } else {
1762                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1763                                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1764                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1765                                 rte_flow_error_set(error, EINVAL,
1766                                         RTE_FLOW_ERROR_TYPE_ITEM,
1767                                         item, "Not supported by fdir filter");
1768                                 return -rte_errno;
1769                         }
1770                 }
1771         }
1772
1773         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1774                 if (!(item->spec && item->mask)) {
1775                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1776                         rte_flow_error_set(error, EINVAL,
1777                                 RTE_FLOW_ERROR_TYPE_ITEM,
1778                                 item, "Not supported by fdir filter");
1779                         return -rte_errno;
1780                 }
1781
1782                 /*Not supported last point for range*/
1783                 if (item->last) {
1784                         rte_flow_error_set(error, EINVAL,
1785                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1786                                 item, "Not supported last point for range");
1787                         return -rte_errno;
1788                 }
1789
1790                 vlan_spec = item->spec;
1791                 vlan_mask = item->mask;
1792
1793                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1794
1795                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1796                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1797                 /* More than one tags are not supported. */
1798
1799                 /* Next not void item must be END */
1800                 item = next_no_fuzzy_pattern(pattern, item);
1801                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1802                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1803                         rte_flow_error_set(error, EINVAL,
1804                                 RTE_FLOW_ERROR_TYPE_ITEM,
1805                                 item, "Not supported by fdir filter");
1806                         return -rte_errno;
1807                 }
1808         }
1809
1810         /* Get the IPV4 info. */
1811         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1812                 /**
1813                  * Set the flow type even if there's no content
1814                  * as we must have a flow type.
1815                  */
1816                 rule->ixgbe_fdir.formatted.flow_type =
1817                         IXGBE_ATR_FLOW_TYPE_IPV4;
1818                 /*Not supported last point for range*/
1819                 if (item->last) {
1820                         rte_flow_error_set(error, EINVAL,
1821                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1822                                 item, "Not supported last point for range");
1823                         return -rte_errno;
1824                 }
1825                 /**
1826                  * Only care about src & dst addresses,
1827                  * others should be masked.
1828                  */
1829                 if (!item->mask) {
1830                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1831                         rte_flow_error_set(error, EINVAL,
1832                                 RTE_FLOW_ERROR_TYPE_ITEM,
1833                                 item, "Not supported by fdir filter");
1834                         return -rte_errno;
1835                 }
1836                 rule->b_mask = TRUE;
1837                 ipv4_mask = item->mask;
1838                 if (ipv4_mask->hdr.version_ihl ||
1839                     ipv4_mask->hdr.type_of_service ||
1840                     ipv4_mask->hdr.total_length ||
1841                     ipv4_mask->hdr.packet_id ||
1842                     ipv4_mask->hdr.fragment_offset ||
1843                     ipv4_mask->hdr.time_to_live ||
1844                     ipv4_mask->hdr.next_proto_id ||
1845                     ipv4_mask->hdr.hdr_checksum) {
1846                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1847                         rte_flow_error_set(error, EINVAL,
1848                                 RTE_FLOW_ERROR_TYPE_ITEM,
1849                                 item, "Not supported by fdir filter");
1850                         return -rte_errno;
1851                 }
1852                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1853                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1854
1855                 if (item->spec) {
1856                         rule->b_spec = TRUE;
1857                         ipv4_spec = item->spec;
1858                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1859                                 ipv4_spec->hdr.dst_addr;
1860                         rule->ixgbe_fdir.formatted.src_ip[0] =
1861                                 ipv4_spec->hdr.src_addr;
1862                 }
1863
1864                 /**
1865                  * Check if the next not void item is
1866                  * TCP or UDP or SCTP or END.
1867                  */
1868                 item = next_no_fuzzy_pattern(pattern, item);
1869                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1870                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1871                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1872                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1873                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1874                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1875                         rte_flow_error_set(error, EINVAL,
1876                                 RTE_FLOW_ERROR_TYPE_ITEM,
1877                                 item, "Not supported by fdir filter");
1878                         return -rte_errno;
1879                 }
1880         }
1881
1882         /* Get the IPV6 info. */
1883         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1884                 /**
1885                  * Set the flow type even if there's no content
1886                  * as we must have a flow type.
1887                  */
1888                 rule->ixgbe_fdir.formatted.flow_type =
1889                         IXGBE_ATR_FLOW_TYPE_IPV6;
1890
1891                 /**
1892                  * 1. must signature match
1893                  * 2. not support last
1894                  * 3. mask must not null
1895                  */
1896                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1897                     item->last ||
1898                     !item->mask) {
1899                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1900                         rte_flow_error_set(error, EINVAL,
1901                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1902                                 item, "Not supported last point for range");
1903                         return -rte_errno;
1904                 }
1905
1906                 rule->b_mask = TRUE;
1907                 ipv6_mask = item->mask;
1908                 if (ipv6_mask->hdr.vtc_flow ||
1909                     ipv6_mask->hdr.payload_len ||
1910                     ipv6_mask->hdr.proto ||
1911                     ipv6_mask->hdr.hop_limits) {
1912                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1913                         rte_flow_error_set(error, EINVAL,
1914                                 RTE_FLOW_ERROR_TYPE_ITEM,
1915                                 item, "Not supported by fdir filter");
1916                         return -rte_errno;
1917                 }
1918
1919                 /* check src addr mask */
1920                 for (j = 0; j < 16; j++) {
1921                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1922                                 rule->mask.src_ipv6_mask |= 1 << j;
1923                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1924                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1925                                 rte_flow_error_set(error, EINVAL,
1926                                         RTE_FLOW_ERROR_TYPE_ITEM,
1927                                         item, "Not supported by fdir filter");
1928                                 return -rte_errno;
1929                         }
1930                 }
1931
1932                 /* check dst addr mask */
1933                 for (j = 0; j < 16; j++) {
1934                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1935                                 rule->mask.dst_ipv6_mask |= 1 << j;
1936                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1937                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1938                                 rte_flow_error_set(error, EINVAL,
1939                                         RTE_FLOW_ERROR_TYPE_ITEM,
1940                                         item, "Not supported by fdir filter");
1941                                 return -rte_errno;
1942                         }
1943                 }
1944
1945                 if (item->spec) {
1946                         rule->b_spec = TRUE;
1947                         ipv6_spec = item->spec;
1948                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1949                                    ipv6_spec->hdr.src_addr, 16);
1950                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1951                                    ipv6_spec->hdr.dst_addr, 16);
1952                 }
1953
1954                 /**
1955                  * Check if the next not void item is
1956                  * TCP or UDP or SCTP or END.
1957                  */
1958                 item = next_no_fuzzy_pattern(pattern, item);
1959                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1960                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1961                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1962                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1963                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1964                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1965                         rte_flow_error_set(error, EINVAL,
1966                                 RTE_FLOW_ERROR_TYPE_ITEM,
1967                                 item, "Not supported by fdir filter");
1968                         return -rte_errno;
1969                 }
1970         }
1971
1972         /* Get the TCP info. */
1973         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1974                 /**
1975                  * Set the flow type even if there's no content
1976                  * as we must have a flow type.
1977                  */
1978                 rule->ixgbe_fdir.formatted.flow_type |=
1979                         IXGBE_ATR_L4TYPE_TCP;
1980                 /*Not supported last point for range*/
1981                 if (item->last) {
1982                         rte_flow_error_set(error, EINVAL,
1983                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1984                                 item, "Not supported last point for range");
1985                         return -rte_errno;
1986                 }
1987                 /**
1988                  * Only care about src & dst ports,
1989                  * others should be masked.
1990                  */
1991                 if (!item->mask) {
1992                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1993                         rte_flow_error_set(error, EINVAL,
1994                                 RTE_FLOW_ERROR_TYPE_ITEM,
1995                                 item, "Not supported by fdir filter");
1996                         return -rte_errno;
1997                 }
1998                 rule->b_mask = TRUE;
1999                 tcp_mask = item->mask;
2000                 if (tcp_mask->hdr.sent_seq ||
2001                     tcp_mask->hdr.recv_ack ||
2002                     tcp_mask->hdr.data_off ||
2003                     tcp_mask->hdr.tcp_flags ||
2004                     tcp_mask->hdr.rx_win ||
2005                     tcp_mask->hdr.cksum ||
2006                     tcp_mask->hdr.tcp_urp) {
2007                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2008                         rte_flow_error_set(error, EINVAL,
2009                                 RTE_FLOW_ERROR_TYPE_ITEM,
2010                                 item, "Not supported by fdir filter");
2011                         return -rte_errno;
2012                 }
2013                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
2014                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
2015
2016                 if (item->spec) {
2017                         rule->b_spec = TRUE;
2018                         tcp_spec = item->spec;
2019                         rule->ixgbe_fdir.formatted.src_port =
2020                                 tcp_spec->hdr.src_port;
2021                         rule->ixgbe_fdir.formatted.dst_port =
2022                                 tcp_spec->hdr.dst_port;
2023                 }
2024
2025                 item = next_no_fuzzy_pattern(pattern, item);
2026                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2027                     item->type != RTE_FLOW_ITEM_TYPE_END) {
2028                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2029                         rte_flow_error_set(error, EINVAL,
2030                                 RTE_FLOW_ERROR_TYPE_ITEM,
2031                                 item, "Not supported by fdir filter");
2032                         return -rte_errno;
2033                 }
2034
2035         }
2036
2037         /* Get the UDP info */
2038         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2039                 /**
2040                  * Set the flow type even if there's no content
2041                  * as we must have a flow type.
2042                  */
2043                 rule->ixgbe_fdir.formatted.flow_type |=
2044                         IXGBE_ATR_L4TYPE_UDP;
2045                 /*Not supported last point for range*/
2046                 if (item->last) {
2047                         rte_flow_error_set(error, EINVAL,
2048                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2049                                 item, "Not supported last point for range");
2050                         return -rte_errno;
2051                 }
2052                 /**
2053                  * Only care about src & dst ports,
2054                  * others should be masked.
2055                  */
2056                 if (!item->mask) {
2057                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2058                         rte_flow_error_set(error, EINVAL,
2059                                 RTE_FLOW_ERROR_TYPE_ITEM,
2060                                 item, "Not supported by fdir filter");
2061                         return -rte_errno;
2062                 }
2063                 rule->b_mask = TRUE;
2064                 udp_mask = item->mask;
2065                 if (udp_mask->hdr.dgram_len ||
2066                     udp_mask->hdr.dgram_cksum) {
2067                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2068                         rte_flow_error_set(error, EINVAL,
2069                                 RTE_FLOW_ERROR_TYPE_ITEM,
2070                                 item, "Not supported by fdir filter");
2071                         return -rte_errno;
2072                 }
2073                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2074                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2075
2076                 if (item->spec) {
2077                         rule->b_spec = TRUE;
2078                         udp_spec = item->spec;
2079                         rule->ixgbe_fdir.formatted.src_port =
2080                                 udp_spec->hdr.src_port;
2081                         rule->ixgbe_fdir.formatted.dst_port =
2082                                 udp_spec->hdr.dst_port;
2083                 }
2084
2085                 item = next_no_fuzzy_pattern(pattern, item);
2086                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2087                     item->type != RTE_FLOW_ITEM_TYPE_END) {
2088                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2089                         rte_flow_error_set(error, EINVAL,
2090                                 RTE_FLOW_ERROR_TYPE_ITEM,
2091                                 item, "Not supported by fdir filter");
2092                         return -rte_errno;
2093                 }
2094
2095         }
2096
2097         /* Get the SCTP info */
2098         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2099                 /**
2100                  * Set the flow type even if there's no content
2101                  * as we must have a flow type.
2102                  */
2103                 rule->ixgbe_fdir.formatted.flow_type |=
2104                         IXGBE_ATR_L4TYPE_SCTP;
2105                 /*Not supported last point for range*/
2106                 if (item->last) {
2107                         rte_flow_error_set(error, EINVAL,
2108                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2109                                 item, "Not supported last point for range");
2110                         return -rte_errno;
2111                 }
2112
2113                 /* only x550 family only support sctp port */
2114                 if (hw->mac.type == ixgbe_mac_X550 ||
2115                     hw->mac.type == ixgbe_mac_X550EM_x ||
2116                     hw->mac.type == ixgbe_mac_X550EM_a) {
2117                         /**
2118                          * Only care about src & dst ports,
2119                          * others should be masked.
2120                          */
2121                         if (!item->mask) {
2122                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2123                                 rte_flow_error_set(error, EINVAL,
2124                                         RTE_FLOW_ERROR_TYPE_ITEM,
2125                                         item, "Not supported by fdir filter");
2126                                 return -rte_errno;
2127                         }
2128                         rule->b_mask = TRUE;
2129                         sctp_mask = item->mask;
2130                         if (sctp_mask->hdr.tag ||
2131                                 sctp_mask->hdr.cksum) {
2132                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2133                                 rte_flow_error_set(error, EINVAL,
2134                                         RTE_FLOW_ERROR_TYPE_ITEM,
2135                                         item, "Not supported by fdir filter");
2136                                 return -rte_errno;
2137                         }
2138                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2139                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2140
2141                         if (item->spec) {
2142                                 rule->b_spec = TRUE;
2143                                 sctp_spec = item->spec;
2144                                 rule->ixgbe_fdir.formatted.src_port =
2145                                         sctp_spec->hdr.src_port;
2146                                 rule->ixgbe_fdir.formatted.dst_port =
2147                                         sctp_spec->hdr.dst_port;
2148                         }
2149                 /* others even sctp port is not supported */
2150                 } else {
2151                         sctp_mask = item->mask;
2152                         if (sctp_mask &&
2153                                 (sctp_mask->hdr.src_port ||
2154                                  sctp_mask->hdr.dst_port ||
2155                                  sctp_mask->hdr.tag ||
2156                                  sctp_mask->hdr.cksum)) {
2157                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2158                                 rte_flow_error_set(error, EINVAL,
2159                                         RTE_FLOW_ERROR_TYPE_ITEM,
2160                                         item, "Not supported by fdir filter");
2161                                 return -rte_errno;
2162                         }
2163                 }
2164
2165                 item = next_no_fuzzy_pattern(pattern, item);
2166                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2167                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2168                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2169                         rte_flow_error_set(error, EINVAL,
2170                                 RTE_FLOW_ERROR_TYPE_ITEM,
2171                                 item, "Not supported by fdir filter");
2172                         return -rte_errno;
2173                 }
2174         }
2175
2176         /* Get the flex byte info */
2177         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2178                 /* Not supported last point for range*/
2179                 if (item->last) {
2180                         rte_flow_error_set(error, EINVAL,
2181                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2182                                 item, "Not supported last point for range");
2183                         return -rte_errno;
2184                 }
2185                 /* mask should not be null */
2186                 if (!item->mask || !item->spec) {
2187                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2188                         rte_flow_error_set(error, EINVAL,
2189                                 RTE_FLOW_ERROR_TYPE_ITEM,
2190                                 item, "Not supported by fdir filter");
2191                         return -rte_errno;
2192                 }
2193
2194                 raw_mask = item->mask;
2195
2196                 /* check mask */
2197                 if (raw_mask->relative != 0x1 ||
2198                     raw_mask->search != 0x1 ||
2199                     raw_mask->reserved != 0x0 ||
2200                     (uint32_t)raw_mask->offset != 0xffffffff ||
2201                     raw_mask->limit != 0xffff ||
2202                     raw_mask->length != 0xffff) {
2203                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2204                         rte_flow_error_set(error, EINVAL,
2205                                 RTE_FLOW_ERROR_TYPE_ITEM,
2206                                 item, "Not supported by fdir filter");
2207                         return -rte_errno;
2208                 }
2209
2210                 raw_spec = item->spec;
2211
2212                 /* check spec */
2213                 if (raw_spec->relative != 0 ||
2214                     raw_spec->search != 0 ||
2215                     raw_spec->reserved != 0 ||
2216                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2217                     raw_spec->offset % 2 ||
2218                     raw_spec->limit != 0 ||
2219                     raw_spec->length != 2 ||
2220                     /* pattern can't be 0xffff */
2221                     (raw_spec->pattern[0] == 0xff &&
2222                      raw_spec->pattern[1] == 0xff)) {
2223                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2224                         rte_flow_error_set(error, EINVAL,
2225                                 RTE_FLOW_ERROR_TYPE_ITEM,
2226                                 item, "Not supported by fdir filter");
2227                         return -rte_errno;
2228                 }
2229
2230                 /* check pattern mask */
2231                 if (raw_mask->pattern[0] != 0xff ||
2232                     raw_mask->pattern[1] != 0xff) {
2233                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2234                         rte_flow_error_set(error, EINVAL,
2235                                 RTE_FLOW_ERROR_TYPE_ITEM,
2236                                 item, "Not supported by fdir filter");
2237                         return -rte_errno;
2238                 }
2239
2240                 rule->mask.flex_bytes_mask = 0xffff;
2241                 rule->ixgbe_fdir.formatted.flex_bytes =
2242                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2243                         raw_spec->pattern[0];
2244                 rule->flex_bytes_offset = raw_spec->offset;
2245         }
2246
2247         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2248                 /* check if the next not void item is END */
2249                 item = next_no_fuzzy_pattern(pattern, item);
2250                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2251                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2252                         rte_flow_error_set(error, EINVAL,
2253                                 RTE_FLOW_ERROR_TYPE_ITEM,
2254                                 item, "Not supported by fdir filter");
2255                         return -rte_errno;
2256                 }
2257         }
2258
2259         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2260 }
2261
2262 #define NVGRE_PROTOCOL 0x6558
2263
2264 /**
2265  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2266  * And get the flow director filter info BTW.
2267  * VxLAN PATTERN:
2268  * The first not void item must be ETH.
2269  * The second not void item must be IPV4/ IPV6.
2270  * The third not void item must be NVGRE.
2271  * The next not void item must be END.
2272  * NVGRE PATTERN:
2273  * The first not void item must be ETH.
2274  * The second not void item must be IPV4/ IPV6.
2275  * The third not void item must be NVGRE.
2276  * The next not void item must be END.
2277  * ACTION:
2278  * The first not void action should be QUEUE or DROP.
2279  * The second not void optional action should be MARK,
2280  * mark_id is a uint32_t number.
2281  * The next not void action should be END.
2282  * VxLAN pattern example:
2283  * ITEM         Spec                    Mask
2284  * ETH          NULL                    NULL
2285  * IPV4/IPV6    NULL                    NULL
2286  * UDP          NULL                    NULL
2287  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2288  * MAC VLAN     tci     0x2016          0xEFFF
2289  * END
2290  * NEGRV pattern example:
2291  * ITEM         Spec                    Mask
2292  * ETH          NULL                    NULL
2293  * IPV4/IPV6    NULL                    NULL
2294  * NVGRE        protocol        0x6558  0xFFFF
2295  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2296  * MAC VLAN     tci     0x2016          0xEFFF
2297  * END
2298  * other members in mask and spec should set to 0x00.
2299  * item->last should be NULL.
2300  */
2301 static int
2302 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2303                                const struct rte_flow_item pattern[],
2304                                const struct rte_flow_action actions[],
2305                                struct ixgbe_fdir_rule *rule,
2306                                struct rte_flow_error *error)
2307 {
2308         const struct rte_flow_item *item;
2309         const struct rte_flow_item_vxlan *vxlan_spec;
2310         const struct rte_flow_item_vxlan *vxlan_mask;
2311         const struct rte_flow_item_nvgre *nvgre_spec;
2312         const struct rte_flow_item_nvgre *nvgre_mask;
2313         const struct rte_flow_item_eth *eth_spec;
2314         const struct rte_flow_item_eth *eth_mask;
2315         const struct rte_flow_item_vlan *vlan_spec;
2316         const struct rte_flow_item_vlan *vlan_mask;
2317         uint32_t j;
2318
2319         if (!pattern) {
2320                 rte_flow_error_set(error, EINVAL,
2321                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2322                                    NULL, "NULL pattern.");
2323                 return -rte_errno;
2324         }
2325
2326         if (!actions) {
2327                 rte_flow_error_set(error, EINVAL,
2328                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2329                                    NULL, "NULL action.");
2330                 return -rte_errno;
2331         }
2332
2333         if (!attr) {
2334                 rte_flow_error_set(error, EINVAL,
2335                                    RTE_FLOW_ERROR_TYPE_ATTR,
2336                                    NULL, "NULL attribute.");
2337                 return -rte_errno;
2338         }
2339
2340         /**
2341          * Some fields may not be provided. Set spec to 0 and mask to default
2342          * value. So, we need not do anything for the not provided fields later.
2343          */
2344         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2345         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2346         rule->mask.vlan_tci_mask = 0;
2347
2348         /**
2349          * The first not void item should be
2350          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2351          */
2352         item = next_no_void_pattern(pattern, NULL);
2353         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2354             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2355             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2356             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2357             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2358             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2359                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2360                 rte_flow_error_set(error, EINVAL,
2361                         RTE_FLOW_ERROR_TYPE_ITEM,
2362                         item, "Not supported by fdir filter");
2363                 return -rte_errno;
2364         }
2365
2366         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2367
2368         /* Skip MAC. */
2369         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2370                 /* Only used to describe the protocol stack. */
2371                 if (item->spec || item->mask) {
2372                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2373                         rte_flow_error_set(error, EINVAL,
2374                                 RTE_FLOW_ERROR_TYPE_ITEM,
2375                                 item, "Not supported by fdir filter");
2376                         return -rte_errno;
2377                 }
2378                 /* Not supported last point for range*/
2379                 if (item->last) {
2380                         rte_flow_error_set(error, EINVAL,
2381                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2382                                 item, "Not supported last point for range");
2383                         return -rte_errno;
2384                 }
2385
2386                 /* Check if the next not void item is IPv4 or IPv6. */
2387                 item = next_no_void_pattern(pattern, item);
2388                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2389                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2390                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2391                         rte_flow_error_set(error, EINVAL,
2392                                 RTE_FLOW_ERROR_TYPE_ITEM,
2393                                 item, "Not supported by fdir filter");
2394                         return -rte_errno;
2395                 }
2396         }
2397
2398         /* Skip IP. */
2399         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2400             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2401                 /* Only used to describe the protocol stack. */
2402                 if (item->spec || item->mask) {
2403                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2404                         rte_flow_error_set(error, EINVAL,
2405                                 RTE_FLOW_ERROR_TYPE_ITEM,
2406                                 item, "Not supported by fdir filter");
2407                         return -rte_errno;
2408                 }
2409                 /*Not supported last point for range*/
2410                 if (item->last) {
2411                         rte_flow_error_set(error, EINVAL,
2412                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2413                                 item, "Not supported last point for range");
2414                         return -rte_errno;
2415                 }
2416
2417                 /* Check if the next not void item is UDP or NVGRE. */
2418                 item = next_no_void_pattern(pattern, item);
2419                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2420                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2421                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2422                         rte_flow_error_set(error, EINVAL,
2423                                 RTE_FLOW_ERROR_TYPE_ITEM,
2424                                 item, "Not supported by fdir filter");
2425                         return -rte_errno;
2426                 }
2427         }
2428
2429         /* Skip UDP. */
2430         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2431                 /* Only used to describe the protocol stack. */
2432                 if (item->spec || item->mask) {
2433                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2434                         rte_flow_error_set(error, EINVAL,
2435                                 RTE_FLOW_ERROR_TYPE_ITEM,
2436                                 item, "Not supported by fdir filter");
2437                         return -rte_errno;
2438                 }
2439                 /*Not supported last point for range*/
2440                 if (item->last) {
2441                         rte_flow_error_set(error, EINVAL,
2442                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2443                                 item, "Not supported last point for range");
2444                         return -rte_errno;
2445                 }
2446
2447                 /* Check if the next not void item is VxLAN. */
2448                 item = next_no_void_pattern(pattern, item);
2449                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2450                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2451                         rte_flow_error_set(error, EINVAL,
2452                                 RTE_FLOW_ERROR_TYPE_ITEM,
2453                                 item, "Not supported by fdir filter");
2454                         return -rte_errno;
2455                 }
2456         }
2457
2458         /* Get the VxLAN info */
2459         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2460                 rule->ixgbe_fdir.formatted.tunnel_type =
2461                                 IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
2462
2463                 /* Only care about VNI, others should be masked. */
2464                 if (!item->mask) {
2465                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2466                         rte_flow_error_set(error, EINVAL,
2467                                 RTE_FLOW_ERROR_TYPE_ITEM,
2468                                 item, "Not supported by fdir filter");
2469                         return -rte_errno;
2470                 }
2471                 /*Not supported last point for range*/
2472                 if (item->last) {
2473                         rte_flow_error_set(error, EINVAL,
2474                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2475                                 item, "Not supported last point for range");
2476                         return -rte_errno;
2477                 }
2478                 rule->b_mask = TRUE;
2479
2480                 /* Tunnel type is always meaningful. */
2481                 rule->mask.tunnel_type_mask = 1;
2482
2483                 vxlan_mask = item->mask;
2484                 if (vxlan_mask->flags) {
2485                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2486                         rte_flow_error_set(error, EINVAL,
2487                                 RTE_FLOW_ERROR_TYPE_ITEM,
2488                                 item, "Not supported by fdir filter");
2489                         return -rte_errno;
2490                 }
2491                 /* VNI must be totally masked or not. */
2492                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2493                         vxlan_mask->vni[2]) &&
2494                         ((vxlan_mask->vni[0] != 0xFF) ||
2495                         (vxlan_mask->vni[1] != 0xFF) ||
2496                                 (vxlan_mask->vni[2] != 0xFF))) {
2497                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2498                         rte_flow_error_set(error, EINVAL,
2499                                 RTE_FLOW_ERROR_TYPE_ITEM,
2500                                 item, "Not supported by fdir filter");
2501                         return -rte_errno;
2502                 }
2503
2504                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2505                         RTE_DIM(vxlan_mask->vni));
2506
2507                 if (item->spec) {
2508                         rule->b_spec = TRUE;
2509                         vxlan_spec = item->spec;
2510                         rte_memcpy(((uint8_t *)
2511                                 &rule->ixgbe_fdir.formatted.tni_vni),
2512                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2513                 }
2514         }
2515
2516         /* Get the NVGRE info */
2517         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2518                 rule->ixgbe_fdir.formatted.tunnel_type =
2519                                 IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
2520
2521                 /**
2522                  * Only care about flags0, flags1, protocol and TNI,
2523                  * others should be masked.
2524                  */
2525                 if (!item->mask) {
2526                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2527                         rte_flow_error_set(error, EINVAL,
2528                                 RTE_FLOW_ERROR_TYPE_ITEM,
2529                                 item, "Not supported by fdir filter");
2530                         return -rte_errno;
2531                 }
2532                 /*Not supported last point for range*/
2533                 if (item->last) {
2534                         rte_flow_error_set(error, EINVAL,
2535                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2536                                 item, "Not supported last point for range");
2537                         return -rte_errno;
2538                 }
2539                 rule->b_mask = TRUE;
2540
2541                 /* Tunnel type is always meaningful. */
2542                 rule->mask.tunnel_type_mask = 1;
2543
2544                 nvgre_mask = item->mask;
2545                 if (nvgre_mask->flow_id) {
2546                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2547                         rte_flow_error_set(error, EINVAL,
2548                                 RTE_FLOW_ERROR_TYPE_ITEM,
2549                                 item, "Not supported by fdir filter");
2550                         return -rte_errno;
2551                 }
2552                 if (nvgre_mask->protocol &&
2553                     nvgre_mask->protocol != 0xFFFF) {
2554                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2555                         rte_flow_error_set(error, EINVAL,
2556                                 RTE_FLOW_ERROR_TYPE_ITEM,
2557                                 item, "Not supported by fdir filter");
2558                         return -rte_errno;
2559                 }
2560                 if (nvgre_mask->c_k_s_rsvd0_ver &&
2561                     nvgre_mask->c_k_s_rsvd0_ver !=
2562                         rte_cpu_to_be_16(0xFFFF)) {
2563                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2564                         rte_flow_error_set(error, EINVAL,
2565                                 RTE_FLOW_ERROR_TYPE_ITEM,
2566                                 item, "Not supported by fdir filter");
2567                         return -rte_errno;
2568                 }
2569                 /* TNI must be totally masked or not. */
2570                 if (nvgre_mask->tni[0] &&
2571                     ((nvgre_mask->tni[0] != 0xFF) ||
2572                     (nvgre_mask->tni[1] != 0xFF) ||
2573                     (nvgre_mask->tni[2] != 0xFF))) {
2574                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2575                         rte_flow_error_set(error, EINVAL,
2576                                 RTE_FLOW_ERROR_TYPE_ITEM,
2577                                 item, "Not supported by fdir filter");
2578                         return -rte_errno;
2579                 }
2580                 /* tni is a 24-bits bit field */
2581                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2582                         RTE_DIM(nvgre_mask->tni));
2583                 rule->mask.tunnel_id_mask <<= 8;
2584
2585                 if (item->spec) {
2586                         rule->b_spec = TRUE;
2587                         nvgre_spec = item->spec;
2588                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2589                             rte_cpu_to_be_16(0x2000) &&
2590                                 nvgre_mask->c_k_s_rsvd0_ver) {
2591                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2592                                 rte_flow_error_set(error, EINVAL,
2593                                         RTE_FLOW_ERROR_TYPE_ITEM,
2594                                         item, "Not supported by fdir filter");
2595                                 return -rte_errno;
2596                         }
2597                         if (nvgre_mask->protocol &&
2598                             nvgre_spec->protocol !=
2599                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2600                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2601                                 rte_flow_error_set(error, EINVAL,
2602                                         RTE_FLOW_ERROR_TYPE_ITEM,
2603                                         item, "Not supported by fdir filter");
2604                                 return -rte_errno;
2605                         }
2606                         /* tni is a 24-bits bit field */
2607                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2608                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2609                 }
2610         }
2611
2612         /* check if the next not void item is MAC */
2613         item = next_no_void_pattern(pattern, item);
2614         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2615                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2616                 rte_flow_error_set(error, EINVAL,
2617                         RTE_FLOW_ERROR_TYPE_ITEM,
2618                         item, "Not supported by fdir filter");
2619                 return -rte_errno;
2620         }
2621
2622         /**
2623          * Only support vlan and dst MAC address,
2624          * others should be masked.
2625          */
2626
2627         if (!item->mask) {
2628                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2629                 rte_flow_error_set(error, EINVAL,
2630                         RTE_FLOW_ERROR_TYPE_ITEM,
2631                         item, "Not supported by fdir filter");
2632                 return -rte_errno;
2633         }
2634         /*Not supported last point for range*/
2635         if (item->last) {
2636                 rte_flow_error_set(error, EINVAL,
2637                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2638                         item, "Not supported last point for range");
2639                 return -rte_errno;
2640         }
2641         rule->b_mask = TRUE;
2642         eth_mask = item->mask;
2643
2644         /* Ether type should be masked. */
2645         if (eth_mask->type) {
2646                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2647                 rte_flow_error_set(error, EINVAL,
2648                         RTE_FLOW_ERROR_TYPE_ITEM,
2649                         item, "Not supported by fdir filter");
2650                 return -rte_errno;
2651         }
2652
2653         /* src MAC address should be masked. */
2654         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2655                 if (eth_mask->src.addr_bytes[j]) {
2656                         memset(rule, 0,
2657                                sizeof(struct ixgbe_fdir_rule));
2658                         rte_flow_error_set(error, EINVAL,
2659                                 RTE_FLOW_ERROR_TYPE_ITEM,
2660                                 item, "Not supported by fdir filter");
2661                         return -rte_errno;
2662                 }
2663         }
2664         rule->mask.mac_addr_byte_mask = 0;
2665         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2666                 /* It's a per byte mask. */
2667                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2668                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2669                 } else if (eth_mask->dst.addr_bytes[j]) {
2670                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2671                         rte_flow_error_set(error, EINVAL,
2672                                 RTE_FLOW_ERROR_TYPE_ITEM,
2673                                 item, "Not supported by fdir filter");
2674                         return -rte_errno;
2675                 }
2676         }
2677
2678         /* When no vlan, considered as full mask. */
2679         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2680
2681         if (item->spec) {
2682                 rule->b_spec = TRUE;
2683                 eth_spec = item->spec;
2684
2685                 /* Get the dst MAC. */
2686                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2687                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2688                                 eth_spec->dst.addr_bytes[j];
2689                 }
2690         }
2691
2692         /**
2693          * Check if the next not void item is vlan or ipv4.
2694          * IPv6 is not supported.
2695          */
2696         item = next_no_void_pattern(pattern, item);
2697         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2698                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2699                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2700                 rte_flow_error_set(error, EINVAL,
2701                         RTE_FLOW_ERROR_TYPE_ITEM,
2702                         item, "Not supported by fdir filter");
2703                 return -rte_errno;
2704         }
2705         /*Not supported last point for range*/
2706         if (item->last) {
2707                 rte_flow_error_set(error, EINVAL,
2708                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2709                         item, "Not supported last point for range");
2710                 return -rte_errno;
2711         }
2712
2713         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2714                 if (!(item->spec && item->mask)) {
2715                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2716                         rte_flow_error_set(error, EINVAL,
2717                                 RTE_FLOW_ERROR_TYPE_ITEM,
2718                                 item, "Not supported by fdir filter");
2719                         return -rte_errno;
2720                 }
2721
2722                 vlan_spec = item->spec;
2723                 vlan_mask = item->mask;
2724
2725                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2726
2727                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2728                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2729                 /* More than one tags are not supported. */
2730
2731                 /* check if the next not void item is END */
2732                 item = next_no_void_pattern(pattern, item);
2733
2734                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2735                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2736                         rte_flow_error_set(error, EINVAL,
2737                                 RTE_FLOW_ERROR_TYPE_ITEM,
2738                                 item, "Not supported by fdir filter");
2739                         return -rte_errno;
2740                 }
2741         }
2742
2743         /**
2744          * If the tags is 0, it means don't care about the VLAN.
2745          * Do nothing.
2746          */
2747
2748         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2749 }
2750
2751 static int
2752 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2753                         const struct rte_flow_attr *attr,
2754                         const struct rte_flow_item pattern[],
2755                         const struct rte_flow_action actions[],
2756                         struct ixgbe_fdir_rule *rule,
2757                         struct rte_flow_error *error)
2758 {
2759         int ret;
2760         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2761         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2762
2763         if (hw->mac.type != ixgbe_mac_82599EB &&
2764                 hw->mac.type != ixgbe_mac_X540 &&
2765                 hw->mac.type != ixgbe_mac_X550 &&
2766                 hw->mac.type != ixgbe_mac_X550EM_x &&
2767                 hw->mac.type != ixgbe_mac_X550EM_a)
2768                 return -ENOTSUP;
2769
2770         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2771                                         actions, rule, error);
2772
2773         if (!ret)
2774                 goto step_next;
2775
2776         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2777                                         actions, rule, error);
2778
2779         if (ret)
2780                 return ret;
2781
2782 step_next:
2783
2784         if (hw->mac.type == ixgbe_mac_82599EB &&
2785                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2786                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2787                 rule->ixgbe_fdir.formatted.dst_port != 0))
2788                 return -ENOTSUP;
2789
2790         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2791             fdir_mode != rule->mode)
2792                 return -ENOTSUP;
2793
2794         if (rule->queue >= dev->data->nb_rx_queues)
2795                 return -ENOTSUP;
2796
2797         return ret;
2798 }
2799
2800 static int
2801 ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
2802                         const struct rte_flow_attr *attr,
2803                         const struct rte_flow_action actions[],
2804                         struct ixgbe_rte_flow_rss_conf *rss_conf,
2805                         struct rte_flow_error *error)
2806 {
2807         const struct rte_flow_action *act;
2808         const struct rte_flow_action_rss *rss;
2809         uint16_t n;
2810
2811         /**
2812          * rss only supports forwarding,
2813          * check if the first not void action is RSS.
2814          */
2815         act = next_no_void_action(actions, NULL);
2816         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2817                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2818                 rte_flow_error_set(error, EINVAL,
2819                         RTE_FLOW_ERROR_TYPE_ACTION,
2820                         act, "Not supported action.");
2821                 return -rte_errno;
2822         }
2823
2824         rss = (const struct rte_flow_action_rss *)act->conf;
2825
2826         if (!rss || !rss->queue_num) {
2827                 rte_flow_error_set(error, EINVAL,
2828                                 RTE_FLOW_ERROR_TYPE_ACTION,
2829                                 act,
2830                            "no valid queues");
2831                 return -rte_errno;
2832         }
2833
2834         for (n = 0; n < rss->queue_num; n++) {
2835                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2836                         rte_flow_error_set(error, EINVAL,
2837                                    RTE_FLOW_ERROR_TYPE_ACTION,
2838                                    act,
2839                                    "queue id > max number of queues");
2840                         return -rte_errno;
2841                 }
2842         }
2843
2844         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2845                 return rte_flow_error_set
2846                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2847                          "non-default RSS hash functions are not supported");
2848         if (rss->level)
2849                 return rte_flow_error_set
2850                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2851                          "a nonzero RSS encapsulation level is not supported");
2852         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2853                 return rte_flow_error_set
2854                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2855                          "RSS hash key must be exactly 40 bytes");
2856         if (rss->queue_num > RTE_DIM(rss_conf->queue))
2857                 return rte_flow_error_set
2858                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2859                          "too many queues for RSS context");
2860         if (ixgbe_rss_conf_init(rss_conf, rss))
2861                 return rte_flow_error_set
2862                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2863                          "RSS context initialization failure");
2864
2865         /* check if the next not void item is END */
2866         act = next_no_void_action(actions, act);
2867         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2868                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2869                 rte_flow_error_set(error, EINVAL,
2870                         RTE_FLOW_ERROR_TYPE_ACTION,
2871                         act, "Not supported action.");
2872                 return -rte_errno;
2873         }
2874
2875         /* parse attr */
2876         /* must be input direction */
2877         if (!attr->ingress) {
2878                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2879                 rte_flow_error_set(error, EINVAL,
2880                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2881                                    attr, "Only support ingress.");
2882                 return -rte_errno;
2883         }
2884
2885         /* not supported */
2886         if (attr->egress) {
2887                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2888                 rte_flow_error_set(error, EINVAL,
2889                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2890                                    attr, "Not support egress.");
2891                 return -rte_errno;
2892         }
2893
2894         /* not supported */
2895         if (attr->transfer) {
2896                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2897                 rte_flow_error_set(error, EINVAL,
2898                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2899                                    attr, "No support for transfer.");
2900                 return -rte_errno;
2901         }
2902
2903         if (attr->priority > 0xFFFF) {
2904                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2905                 rte_flow_error_set(error, EINVAL,
2906                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2907                                    attr, "Error priority.");
2908                 return -rte_errno;
2909         }
2910
2911         return 0;
2912 }
2913
2914 /* remove the rss filter */
2915 static void
2916 ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
2917 {
2918         struct ixgbe_filter_info *filter_info =
2919                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2920
2921         if (filter_info->rss_info.conf.queue_num)
2922                 ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2923 }
2924
2925 void
2926 ixgbe_filterlist_init(void)
2927 {
2928         TAILQ_INIT(&filter_ntuple_list);
2929         TAILQ_INIT(&filter_ethertype_list);
2930         TAILQ_INIT(&filter_syn_list);
2931         TAILQ_INIT(&filter_fdir_list);
2932         TAILQ_INIT(&filter_l2_tunnel_list);
2933         TAILQ_INIT(&filter_rss_list);
2934         TAILQ_INIT(&ixgbe_flow_list);
2935 }
2936
2937 void
2938 ixgbe_filterlist_flush(void)
2939 {
2940         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2941         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2942         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2943         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2944         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2945         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2946         struct ixgbe_rss_conf_ele *rss_filter_ptr;
2947
2948         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2949                 TAILQ_REMOVE(&filter_ntuple_list,
2950                                  ntuple_filter_ptr,
2951                                  entries);
2952                 rte_free(ntuple_filter_ptr);
2953         }
2954
2955         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2956                 TAILQ_REMOVE(&filter_ethertype_list,
2957                                  ethertype_filter_ptr,
2958                                  entries);
2959                 rte_free(ethertype_filter_ptr);
2960         }
2961
2962         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2963                 TAILQ_REMOVE(&filter_syn_list,
2964                                  syn_filter_ptr,
2965                                  entries);
2966                 rte_free(syn_filter_ptr);
2967         }
2968
2969         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2970                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2971                                  l2_tn_filter_ptr,
2972                                  entries);
2973                 rte_free(l2_tn_filter_ptr);
2974         }
2975
2976         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2977                 TAILQ_REMOVE(&filter_fdir_list,
2978                                  fdir_rule_ptr,
2979                                  entries);
2980                 rte_free(fdir_rule_ptr);
2981         }
2982
2983         while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2984                 TAILQ_REMOVE(&filter_rss_list,
2985                                  rss_filter_ptr,
2986                                  entries);
2987                 rte_free(rss_filter_ptr);
2988         }
2989
2990         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2991                 TAILQ_REMOVE(&ixgbe_flow_list,
2992                                  ixgbe_flow_mem_ptr,
2993                                  entries);
2994                 rte_free(ixgbe_flow_mem_ptr->flow);
2995                 rte_free(ixgbe_flow_mem_ptr);
2996         }
2997 }
2998
2999 /**
3000  * Create or destroy a flow rule.
3001  * Theorically one rule can match more than one filters.
3002  * We will let it use the filter which it hitt first.
3003  * So, the sequence matters.
3004  */
3005 static struct rte_flow *
3006 ixgbe_flow_create(struct rte_eth_dev *dev,
3007                   const struct rte_flow_attr *attr,
3008                   const struct rte_flow_item pattern[],
3009                   const struct rte_flow_action actions[],
3010                   struct rte_flow_error *error)
3011 {
3012         int ret;
3013         struct rte_eth_ntuple_filter ntuple_filter;
3014         struct rte_eth_ethertype_filter ethertype_filter;
3015         struct rte_eth_syn_filter syn_filter;
3016         struct ixgbe_fdir_rule fdir_rule;
3017         struct ixgbe_l2_tunnel_conf l2_tn_filter;
3018         struct ixgbe_hw_fdir_info *fdir_info =
3019                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3020         struct ixgbe_rte_flow_rss_conf rss_conf;
3021         struct rte_flow *flow = NULL;
3022         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3023         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3024         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3025         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3026         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3027         struct ixgbe_rss_conf_ele *rss_filter_ptr;
3028         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3029         uint8_t first_mask = FALSE;
3030
3031         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
3032         if (!flow) {
3033                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3034                 return (struct rte_flow *)flow;
3035         }
3036         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
3037                         sizeof(struct ixgbe_flow_mem), 0);
3038         if (!ixgbe_flow_mem_ptr) {
3039                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3040                 rte_free(flow);
3041                 return NULL;
3042         }
3043         ixgbe_flow_mem_ptr->flow = flow;
3044         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
3045                                 ixgbe_flow_mem_ptr, entries);
3046
3047         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3048         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3049                         actions, &ntuple_filter, error);
3050
3051 #ifdef RTE_LIB_SECURITY
3052         /* ESP flow not really a flow*/
3053         if (ntuple_filter.proto == IPPROTO_ESP)
3054                 return flow;
3055 #endif
3056
3057         if (!ret) {
3058                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
3059                 if (!ret) {
3060                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
3061                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
3062                         if (!ntuple_filter_ptr) {
3063                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3064                                 goto out;
3065                         }
3066                         rte_memcpy(&ntuple_filter_ptr->filter_info,
3067                                 &ntuple_filter,
3068                                 sizeof(struct rte_eth_ntuple_filter));
3069                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
3070                                 ntuple_filter_ptr, entries);
3071                         flow->rule = ntuple_filter_ptr;
3072                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
3073                         return flow;
3074                 }
3075                 goto out;
3076         }
3077
3078         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3079         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3080                                 actions, &ethertype_filter, error);
3081         if (!ret) {
3082                 ret = ixgbe_add_del_ethertype_filter(dev,
3083                                 &ethertype_filter, TRUE);
3084                 if (!ret) {
3085                         ethertype_filter_ptr = rte_zmalloc(
3086                                 "ixgbe_ethertype_filter",
3087                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
3088                         if (!ethertype_filter_ptr) {
3089                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3090                                 goto out;
3091                         }
3092                         rte_memcpy(&ethertype_filter_ptr->filter_info,
3093                                 &ethertype_filter,
3094                                 sizeof(struct rte_eth_ethertype_filter));
3095                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
3096                                 ethertype_filter_ptr, entries);
3097                         flow->rule = ethertype_filter_ptr;
3098                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3099                         return flow;
3100                 }
3101                 goto out;
3102         }
3103
3104         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3105         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3106                                 actions, &syn_filter, error);
3107         if (!ret) {
3108                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
3109                 if (!ret) {
3110                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
3111                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
3112                         if (!syn_filter_ptr) {
3113                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3114                                 goto out;
3115                         }
3116                         rte_memcpy(&syn_filter_ptr->filter_info,
3117                                 &syn_filter,
3118                                 sizeof(struct rte_eth_syn_filter));
3119                         TAILQ_INSERT_TAIL(&filter_syn_list,
3120                                 syn_filter_ptr,
3121                                 entries);
3122                         flow->rule = syn_filter_ptr;
3123                         flow->filter_type = RTE_ETH_FILTER_SYN;
3124                         return flow;
3125                 }
3126                 goto out;
3127         }
3128
3129         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3130         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3131                                 actions, &fdir_rule, error);
3132         if (!ret) {
3133                 /* A mask cannot be deleted. */
3134                 if (fdir_rule.b_mask) {
3135                         if (!fdir_info->mask_added) {
3136                                 /* It's the first time the mask is set. */
3137                                 rte_memcpy(&fdir_info->mask,
3138                                         &fdir_rule.mask,
3139                                         sizeof(struct ixgbe_hw_fdir_mask));
3140
3141                                 if (fdir_rule.mask.flex_bytes_mask) {
3142                                         ret = ixgbe_fdir_set_flexbytes_offset(dev,
3143                                                 fdir_rule.flex_bytes_offset);
3144                                         if (ret)
3145                                                 goto out;
3146                                 }
3147                                 ret = ixgbe_fdir_set_input_mask(dev);
3148                                 if (ret)
3149                                         goto out;
3150
3151                                 fdir_info->mask_added = TRUE;
3152                                 first_mask = TRUE;
3153                         } else {
3154                                 /**
3155                                  * Only support one global mask,
3156                                  * all the masks should be the same.
3157                                  */
3158                                 ret = memcmp(&fdir_info->mask,
3159                                         &fdir_rule.mask,
3160                                         sizeof(struct ixgbe_hw_fdir_mask));
3161                                 if (ret)
3162                                         goto out;
3163
3164                                 if (fdir_rule.mask.flex_bytes_mask &&
3165                                     fdir_info->flex_bytes_offset !=
3166                                     fdir_rule.flex_bytes_offset)
3167                                         goto out;
3168                         }
3169                 }
3170
3171                 if (fdir_rule.b_spec) {
3172                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
3173                                         FALSE, FALSE);
3174                         if (!ret) {
3175                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
3176                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
3177                                 if (!fdir_rule_ptr) {
3178                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
3179                                         goto out;
3180                                 }
3181                                 rte_memcpy(&fdir_rule_ptr->filter_info,
3182                                         &fdir_rule,
3183                                         sizeof(struct ixgbe_fdir_rule));
3184                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
3185                                         fdir_rule_ptr, entries);
3186                                 flow->rule = fdir_rule_ptr;
3187                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
3188
3189                                 return flow;
3190                         }
3191
3192                         if (ret) {
3193                                 /**
3194                                  * clean the mask_added flag if fail to
3195                                  * program
3196                                  **/
3197                                 if (first_mask)
3198                                         fdir_info->mask_added = FALSE;
3199                                 goto out;
3200                         }
3201                 }
3202
3203                 goto out;
3204         }
3205
3206         memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
3207         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3208                                         actions, &l2_tn_filter, error);
3209         if (!ret) {
3210                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3211                 if (!ret) {
3212                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3213                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3214                         if (!l2_tn_filter_ptr) {
3215                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3216                                 goto out;
3217                         }
3218                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
3219                                 &l2_tn_filter,
3220                                 sizeof(struct ixgbe_l2_tunnel_conf));
3221                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3222                                 l2_tn_filter_ptr, entries);
3223                         flow->rule = l2_tn_filter_ptr;
3224                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3225                         return flow;
3226                 }
3227         }
3228
3229         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3230         ret = ixgbe_parse_rss_filter(dev, attr,
3231                                         actions, &rss_conf, error);
3232         if (!ret) {
3233                 ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
3234                 if (!ret) {
3235                         rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
3236                                 sizeof(struct ixgbe_rss_conf_ele), 0);
3237                         if (!rss_filter_ptr) {
3238                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3239                                 goto out;
3240                         }
3241                         ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
3242                                             &rss_conf.conf);
3243                         TAILQ_INSERT_TAIL(&filter_rss_list,
3244                                 rss_filter_ptr, entries);
3245                         flow->rule = rss_filter_ptr;
3246                         flow->filter_type = RTE_ETH_FILTER_HASH;
3247                         return flow;
3248                 }
3249         }
3250
3251 out:
3252         TAILQ_REMOVE(&ixgbe_flow_list,
3253                 ixgbe_flow_mem_ptr, entries);
3254         rte_flow_error_set(error, -ret,
3255                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3256                            "Failed to create flow.");
3257         rte_free(ixgbe_flow_mem_ptr);
3258         rte_free(flow);
3259         return NULL;
3260 }
3261
3262 /**
3263  * Check if the flow rule is supported by ixgbe.
3264  * It only checkes the format. Don't guarantee the rule can be programmed into
3265  * the HW. Because there can be no enough room for the rule.
3266  */
3267 static int
3268 ixgbe_flow_validate(struct rte_eth_dev *dev,
3269                 const struct rte_flow_attr *attr,
3270                 const struct rte_flow_item pattern[],
3271                 const struct rte_flow_action actions[],
3272                 struct rte_flow_error *error)
3273 {
3274         struct rte_eth_ntuple_filter ntuple_filter;
3275         struct rte_eth_ethertype_filter ethertype_filter;
3276         struct rte_eth_syn_filter syn_filter;
3277         struct ixgbe_l2_tunnel_conf l2_tn_filter;
3278         struct ixgbe_fdir_rule fdir_rule;
3279         struct ixgbe_rte_flow_rss_conf rss_conf;
3280         int ret;
3281
3282         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3283         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3284                                 actions, &ntuple_filter, error);
3285         if (!ret)
3286                 return 0;
3287
3288         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3289         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3290                                 actions, &ethertype_filter, error);
3291         if (!ret)
3292                 return 0;
3293
3294         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3295         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3296                                 actions, &syn_filter, error);
3297         if (!ret)
3298                 return 0;
3299
3300         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3301         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3302                                 actions, &fdir_rule, error);
3303         if (!ret)
3304                 return 0;
3305
3306         memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
3307         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3308                                 actions, &l2_tn_filter, error);
3309         if (!ret)
3310                 return 0;
3311
3312         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3313         ret = ixgbe_parse_rss_filter(dev, attr,
3314                                         actions, &rss_conf, error);
3315
3316         return ret;
3317 }
3318
3319 /* Destroy a flow rule on ixgbe. */
3320 static int
3321 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3322                 struct rte_flow *flow,
3323                 struct rte_flow_error *error)
3324 {
3325         int ret;
3326         struct rte_flow *pmd_flow = flow;
3327         enum rte_filter_type filter_type = pmd_flow->filter_type;
3328         struct rte_eth_ntuple_filter ntuple_filter;
3329         struct rte_eth_ethertype_filter ethertype_filter;
3330         struct rte_eth_syn_filter syn_filter;
3331         struct ixgbe_fdir_rule fdir_rule;
3332         struct ixgbe_l2_tunnel_conf l2_tn_filter;
3333         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3334         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3335         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3336         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3337         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3338         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3339         struct ixgbe_hw_fdir_info *fdir_info =
3340                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3341         struct ixgbe_rss_conf_ele *rss_filter_ptr;
3342
3343         switch (filter_type) {
3344         case RTE_ETH_FILTER_NTUPLE:
3345                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3346                                         pmd_flow->rule;
3347                 rte_memcpy(&ntuple_filter,
3348                         &ntuple_filter_ptr->filter_info,
3349                         sizeof(struct rte_eth_ntuple_filter));
3350                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3351                 if (!ret) {
3352                         TAILQ_REMOVE(&filter_ntuple_list,
3353                         ntuple_filter_ptr, entries);
3354                         rte_free(ntuple_filter_ptr);
3355                 }
3356                 break;
3357         case RTE_ETH_FILTER_ETHERTYPE:
3358                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3359                                         pmd_flow->rule;
3360                 rte_memcpy(&ethertype_filter,
3361                         &ethertype_filter_ptr->filter_info,
3362                         sizeof(struct rte_eth_ethertype_filter));
3363                 ret = ixgbe_add_del_ethertype_filter(dev,
3364                                 &ethertype_filter, FALSE);
3365                 if (!ret) {
3366                         TAILQ_REMOVE(&filter_ethertype_list,
3367                                 ethertype_filter_ptr, entries);
3368                         rte_free(ethertype_filter_ptr);
3369                 }
3370                 break;
3371         case RTE_ETH_FILTER_SYN:
3372                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3373                                 pmd_flow->rule;
3374                 rte_memcpy(&syn_filter,
3375                         &syn_filter_ptr->filter_info,
3376                         sizeof(struct rte_eth_syn_filter));
3377                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3378                 if (!ret) {
3379                         TAILQ_REMOVE(&filter_syn_list,
3380                                 syn_filter_ptr, entries);
3381                         rte_free(syn_filter_ptr);
3382                 }
3383                 break;
3384         case RTE_ETH_FILTER_FDIR:
3385                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3386                 rte_memcpy(&fdir_rule,
3387                         &fdir_rule_ptr->filter_info,
3388                         sizeof(struct ixgbe_fdir_rule));
3389                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3390                 if (!ret) {
3391                         TAILQ_REMOVE(&filter_fdir_list,
3392                                 fdir_rule_ptr, entries);
3393                         rte_free(fdir_rule_ptr);
3394                         if (TAILQ_EMPTY(&filter_fdir_list))
3395                                 fdir_info->mask_added = false;
3396                 }
3397                 break;
3398         case RTE_ETH_FILTER_L2_TUNNEL:
3399                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3400                                 pmd_flow->rule;
3401                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3402                         sizeof(struct ixgbe_l2_tunnel_conf));
3403                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3404                 if (!ret) {
3405                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3406                                 l2_tn_filter_ptr, entries);
3407                         rte_free(l2_tn_filter_ptr);
3408                 }
3409                 break;
3410         case RTE_ETH_FILTER_HASH:
3411                 rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
3412                                 pmd_flow->rule;
3413                 ret = ixgbe_config_rss_filter(dev,
3414                                         &rss_filter_ptr->filter_info, FALSE);
3415                 if (!ret) {
3416                         TAILQ_REMOVE(&filter_rss_list,
3417                                 rss_filter_ptr, entries);
3418                         rte_free(rss_filter_ptr);
3419                 }
3420                 break;
3421         default:
3422                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3423                             filter_type);
3424                 ret = -EINVAL;
3425                 break;
3426         }
3427
3428         if (ret) {
3429                 rte_flow_error_set(error, EINVAL,
3430                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3431                                 NULL, "Failed to destroy flow");
3432                 return ret;
3433         }
3434
3435         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3436                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3437                         TAILQ_REMOVE(&ixgbe_flow_list,
3438                                 ixgbe_flow_mem_ptr, entries);
3439                         rte_free(ixgbe_flow_mem_ptr);
3440                         break;
3441                 }
3442         }
3443         rte_free(flow);
3444
3445         return ret;
3446 }
3447
3448 /*  Destroy all flow rules associated with a port on ixgbe. */
3449 static int
3450 ixgbe_flow_flush(struct rte_eth_dev *dev,
3451                 struct rte_flow_error *error)
3452 {
3453         int ret = 0;
3454
3455         ixgbe_clear_all_ntuple_filter(dev);
3456         ixgbe_clear_all_ethertype_filter(dev);
3457         ixgbe_clear_syn_filter(dev);
3458
3459         ret = ixgbe_clear_all_fdir_filter(dev);
3460         if (ret < 0) {
3461                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3462                                         NULL, "Failed to flush rule");
3463                 return ret;
3464         }
3465
3466         ret = ixgbe_clear_all_l2_tn_filter(dev);
3467         if (ret < 0) {
3468                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3469                                         NULL, "Failed to flush rule");
3470                 return ret;
3471         }
3472
3473         ixgbe_clear_rss_filter(dev);
3474
3475         ixgbe_filterlist_flush();
3476
3477         return 0;
3478 }
3479
3480 const struct rte_flow_ops ixgbe_flow_ops = {
3481         .validate = ixgbe_flow_validate,
3482         .create = ixgbe_flow_create,
3483         .destroy = ixgbe_flow_destroy,
3484         .flush = ixgbe_flow_flush,
3485 };