effe00ff8bc6da3afb5721ac75ee8fe89ad9960d
[dpdk.git] / drivers / net / txgbe / txgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <sys/queue.h>
6 #include <rte_bus_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_flow.h>
9 #include <rte_flow_driver.h>
10
11 #include "txgbe_ethdev.h"
12
13 #define TXGBE_MIN_N_TUPLE_PRIO 1
14 #define TXGBE_MAX_N_TUPLE_PRIO 7
15 #define TXGBE_MAX_FLX_SOURCE_OFF 62
16
17 /* ntuple filter list structure */
18 struct txgbe_ntuple_filter_ele {
19         TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
20         struct rte_eth_ntuple_filter filter_info;
21 };
22 /* ethertype filter list structure */
23 struct txgbe_ethertype_filter_ele {
24         TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
25         struct rte_eth_ethertype_filter filter_info;
26 };
27 /* syn filter list structure */
28 struct txgbe_eth_syn_filter_ele {
29         TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
30         struct rte_eth_syn_filter filter_info;
31 };
32 /* fdir filter list structure */
33 struct txgbe_fdir_rule_ele {
34         TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
35         struct txgbe_fdir_rule filter_info;
36 };
37 /* l2_tunnel filter list structure */
38 struct txgbe_eth_l2_tunnel_conf_ele {
39         TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
40         struct txgbe_l2_tunnel_conf filter_info;
41 };
42 /* rss filter list structure */
43 struct txgbe_rss_conf_ele {
44         TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
45         struct txgbe_rte_flow_rss_conf filter_info;
46 };
47 /* txgbe_flow memory list structure */
48 struct txgbe_flow_mem {
49         TAILQ_ENTRY(txgbe_flow_mem) entries;
50         struct rte_flow *flow;
51 };
52
53 TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
54 TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
55 TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
56 TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
57 TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
58 TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
59 TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
60
61 static struct txgbe_ntuple_filter_list filter_ntuple_list;
62 static struct txgbe_ethertype_filter_list filter_ethertype_list;
63 static struct txgbe_syn_filter_list filter_syn_list;
64 static struct txgbe_fdir_rule_filter_list filter_fdir_list;
65 static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
66 static struct txgbe_rss_filter_list filter_rss_list;
67 static struct txgbe_flow_mem_list txgbe_flow_list;
68
69 /**
70  * Endless loop will never happen with below assumption
71  * 1. there is at least one no-void item(END)
72  * 2. cur is before END.
73  */
74 static inline
75 const struct rte_flow_item *next_no_void_pattern(
76                 const struct rte_flow_item pattern[],
77                 const struct rte_flow_item *cur)
78 {
79         const struct rte_flow_item *next =
80                 cur ? cur + 1 : &pattern[0];
81         while (1) {
82                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
83                         return next;
84                 next++;
85         }
86 }
87
88 static inline
89 const struct rte_flow_action *next_no_void_action(
90                 const struct rte_flow_action actions[],
91                 const struct rte_flow_action *cur)
92 {
93         const struct rte_flow_action *next =
94                 cur ? cur + 1 : &actions[0];
95         while (1) {
96                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
97                         return next;
98                 next++;
99         }
100 }
101
102 /**
103  * Please aware there's an assumption for all the parsers.
104  * rte_flow_item is using big endian, rte_flow_attr and
105  * rte_flow_action are using CPU order.
106  * Because the pattern is used to describe the packets,
107  * normally the packets should use network order.
108  */
109
110 /**
111  * Parse the rule to see if it is a n-tuple rule.
112  * And get the n-tuple filter info BTW.
113  * pattern:
114  * The first not void item can be ETH or IPV4.
115  * The second not void item must be IPV4 if the first one is ETH.
116  * The third not void item must be UDP or TCP.
117  * The next not void item must be END.
118  * action:
119  * The first not void action should be QUEUE.
120  * The next not void action should be END.
121  * pattern example:
122  * ITEM         Spec                    Mask
123  * ETH          NULL                    NULL
124  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
125  *              dst_addr 192.167.3.50   0xFFFFFFFF
126  *              next_proto_id   17      0xFF
127  * UDP/TCP/     src_port        80      0xFFFF
128  * SCTP         dst_port        80      0xFFFF
129  * END
130  * other members in mask and spec should set to 0x00.
131  * item->last should be NULL.
132  *
133  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
134  *
135  */
136 static int
137 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
138                          const struct rte_flow_item pattern[],
139                          const struct rte_flow_action actions[],
140                          struct rte_eth_ntuple_filter *filter,
141                          struct rte_flow_error *error)
142 {
143         const struct rte_flow_item *item;
144         const struct rte_flow_action *act;
145         const struct rte_flow_item_ipv4 *ipv4_spec;
146         const struct rte_flow_item_ipv4 *ipv4_mask;
147         const struct rte_flow_item_tcp *tcp_spec;
148         const struct rte_flow_item_tcp *tcp_mask;
149         const struct rte_flow_item_udp *udp_spec;
150         const struct rte_flow_item_udp *udp_mask;
151         const struct rte_flow_item_sctp *sctp_spec;
152         const struct rte_flow_item_sctp *sctp_mask;
153         const struct rte_flow_item_eth *eth_spec;
154         const struct rte_flow_item_eth *eth_mask;
155         const struct rte_flow_item_vlan *vlan_spec;
156         const struct rte_flow_item_vlan *vlan_mask;
157         struct rte_flow_item_eth eth_null;
158         struct rte_flow_item_vlan vlan_null;
159
160         if (!pattern) {
161                 rte_flow_error_set(error,
162                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
163                         NULL, "NULL pattern.");
164                 return -rte_errno;
165         }
166
167         if (!actions) {
168                 rte_flow_error_set(error, EINVAL,
169                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
170                                    NULL, "NULL action.");
171                 return -rte_errno;
172         }
173         if (!attr) {
174                 rte_flow_error_set(error, EINVAL,
175                                    RTE_FLOW_ERROR_TYPE_ATTR,
176                                    NULL, "NULL attribute.");
177                 return -rte_errno;
178         }
179
180         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
181         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
182
183 #ifdef RTE_LIB_SECURITY
184         /**
185          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
186          */
187         act = next_no_void_action(actions, NULL);
188         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
189                 const void *conf = act->conf;
190                 /* check if the next not void item is END */
191                 act = next_no_void_action(actions, act);
192                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
193                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
194                         rte_flow_error_set(error, EINVAL,
195                                 RTE_FLOW_ERROR_TYPE_ACTION,
196                                 act, "Not supported action.");
197                         return -rte_errno;
198                 }
199
200                 /* get the IP pattern*/
201                 item = next_no_void_pattern(pattern, NULL);
202                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
203                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
204                         if (item->last ||
205                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
206                                 rte_flow_error_set(error, EINVAL,
207                                         RTE_FLOW_ERROR_TYPE_ITEM,
208                                         item, "IP pattern missing.");
209                                 return -rte_errno;
210                         }
211                         item = next_no_void_pattern(pattern, item);
212                 }
213
214                 filter->proto = IPPROTO_ESP;
215                 return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
216                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
217         }
218 #endif
219
220         /* the first not void item can be MAC or IPv4 */
221         item = next_no_void_pattern(pattern, NULL);
222
223         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
224             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
225                 rte_flow_error_set(error, EINVAL,
226                         RTE_FLOW_ERROR_TYPE_ITEM,
227                         item, "Not supported by ntuple filter");
228                 return -rte_errno;
229         }
230         /* Skip Ethernet */
231         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
232                 eth_spec = item->spec;
233                 eth_mask = item->mask;
234                 /*Not supported last point for range*/
235                 if (item->last) {
236                         rte_flow_error_set(error,
237                           EINVAL,
238                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
239                           item, "Not supported last point for range");
240                         return -rte_errno;
241                 }
242                 /* if the first item is MAC, the content should be NULL */
243                 if ((item->spec && memcmp(eth_spec, &eth_null,
244                                           sizeof(struct rte_flow_item_eth))) ||
245                     (item->mask && memcmp(eth_mask, &eth_null,
246                                           sizeof(struct rte_flow_item_eth)))) {
247                         rte_flow_error_set(error, EINVAL,
248                                 RTE_FLOW_ERROR_TYPE_ITEM,
249                                 item, "Not supported by ntuple filter");
250                         return -rte_errno;
251                 }
252                 /* check if the next not void item is IPv4 or Vlan */
253                 item = next_no_void_pattern(pattern, item);
254                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
255                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
256                         rte_flow_error_set(error,
257                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
258                                 item, "Not supported by ntuple filter");
259                         return -rte_errno;
260                 }
261         }
262
263         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
264                 vlan_spec = item->spec;
265                 vlan_mask = item->mask;
266                 /*Not supported last point for range*/
267                 if (item->last) {
268                         rte_flow_error_set(error,
269                                 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
270                                 item, "Not supported last point for range");
271                         return -rte_errno;
272                 }
273                 /* the content should be NULL */
274                 if ((item->spec && memcmp(vlan_spec, &vlan_null,
275                                           sizeof(struct rte_flow_item_vlan))) ||
276                     (item->mask && memcmp(vlan_mask, &vlan_null,
277                                           sizeof(struct rte_flow_item_vlan)))) {
278                         rte_flow_error_set(error, EINVAL,
279                                 RTE_FLOW_ERROR_TYPE_ITEM,
280                                 item, "Not supported by ntuple filter");
281                         return -rte_errno;
282                 }
283                 /* check if the next not void item is IPv4 */
284                 item = next_no_void_pattern(pattern, item);
285                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
286                         rte_flow_error_set(error,
287                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
288                           item, "Not supported by ntuple filter");
289                         return -rte_errno;
290                 }
291         }
292
293         if (item->mask) {
294                 /* get the IPv4 info */
295                 if (!item->spec || !item->mask) {
296                         rte_flow_error_set(error, EINVAL,
297                                 RTE_FLOW_ERROR_TYPE_ITEM,
298                                 item, "Invalid ntuple mask");
299                         return -rte_errno;
300                 }
301                 /*Not supported last point for range*/
302                 if (item->last) {
303                         rte_flow_error_set(error, EINVAL,
304                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
305                                 item, "Not supported last point for range");
306                         return -rte_errno;
307                 }
308
309                 ipv4_mask = item->mask;
310                 /**
311                  * Only support src & dst addresses, protocol,
312                  * others should be masked.
313                  */
314                 if (ipv4_mask->hdr.version_ihl ||
315                     ipv4_mask->hdr.type_of_service ||
316                     ipv4_mask->hdr.total_length ||
317                     ipv4_mask->hdr.packet_id ||
318                     ipv4_mask->hdr.fragment_offset ||
319                     ipv4_mask->hdr.time_to_live ||
320                     ipv4_mask->hdr.hdr_checksum) {
321                         rte_flow_error_set(error,
322                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
323                                 item, "Not supported by ntuple filter");
324                         return -rte_errno;
325                 }
326                 if ((ipv4_mask->hdr.src_addr != 0 &&
327                         ipv4_mask->hdr.src_addr != UINT32_MAX) ||
328                         (ipv4_mask->hdr.dst_addr != 0 &&
329                         ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
330                         (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
331                         ipv4_mask->hdr.next_proto_id != 0)) {
332                         rte_flow_error_set(error,
333                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
334                                 item, "Not supported by ntuple filter");
335                         return -rte_errno;
336                 }
337
338                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
339                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
340                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
341
342                 ipv4_spec = item->spec;
343                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
344                 filter->src_ip = ipv4_spec->hdr.src_addr;
345                 filter->proto  = ipv4_spec->hdr.next_proto_id;
346         }
347
348         /* check if the next not void item is TCP or UDP */
349         item = next_no_void_pattern(pattern, item);
350         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
351             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
352             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
353             item->type != RTE_FLOW_ITEM_TYPE_END) {
354                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
355                 rte_flow_error_set(error, EINVAL,
356                         RTE_FLOW_ERROR_TYPE_ITEM,
357                         item, "Not supported by ntuple filter");
358                 return -rte_errno;
359         }
360
361         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
362                 (!item->spec && !item->mask)) {
363                 goto action;
364         }
365
366         /* get the TCP/UDP/SCTP info */
367         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
368                 (!item->spec || !item->mask)) {
369                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
370                 rte_flow_error_set(error, EINVAL,
371                         RTE_FLOW_ERROR_TYPE_ITEM,
372                         item, "Invalid ntuple mask");
373                 return -rte_errno;
374         }
375
376         /*Not supported last point for range*/
377         if (item->last) {
378                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
379                 rte_flow_error_set(error, EINVAL,
380                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
381                         item, "Not supported last point for range");
382                 return -rte_errno;
383         }
384
385         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
386                 tcp_mask = item->mask;
387
388                 /**
389                  * Only support src & dst ports, tcp flags,
390                  * others should be masked.
391                  */
392                 if (tcp_mask->hdr.sent_seq ||
393                     tcp_mask->hdr.recv_ack ||
394                     tcp_mask->hdr.data_off ||
395                     tcp_mask->hdr.rx_win ||
396                     tcp_mask->hdr.cksum ||
397                     tcp_mask->hdr.tcp_urp) {
398                         memset(filter, 0,
399                                 sizeof(struct rte_eth_ntuple_filter));
400                         rte_flow_error_set(error, EINVAL,
401                                 RTE_FLOW_ERROR_TYPE_ITEM,
402                                 item, "Not supported by ntuple filter");
403                         return -rte_errno;
404                 }
405                 if ((tcp_mask->hdr.src_port != 0 &&
406                         tcp_mask->hdr.src_port != UINT16_MAX) ||
407                         (tcp_mask->hdr.dst_port != 0 &&
408                         tcp_mask->hdr.dst_port != UINT16_MAX)) {
409                         rte_flow_error_set(error,
410                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
411                                 item, "Not supported by ntuple filter");
412                         return -rte_errno;
413                 }
414
415                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
416                 filter->src_port_mask  = tcp_mask->hdr.src_port;
417                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
418                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
419                 } else if (!tcp_mask->hdr.tcp_flags) {
420                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
421                 } else {
422                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
423                         rte_flow_error_set(error, EINVAL,
424                                 RTE_FLOW_ERROR_TYPE_ITEM,
425                                 item, "Not supported by ntuple filter");
426                         return -rte_errno;
427                 }
428
429                 tcp_spec = item->spec;
430                 filter->dst_port  = tcp_spec->hdr.dst_port;
431                 filter->src_port  = tcp_spec->hdr.src_port;
432                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
433         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
434                 udp_mask = item->mask;
435
436                 /**
437                  * Only support src & dst ports,
438                  * others should be masked.
439                  */
440                 if (udp_mask->hdr.dgram_len ||
441                     udp_mask->hdr.dgram_cksum) {
442                         memset(filter, 0,
443                                 sizeof(struct rte_eth_ntuple_filter));
444                         rte_flow_error_set(error, EINVAL,
445                                 RTE_FLOW_ERROR_TYPE_ITEM,
446                                 item, "Not supported by ntuple filter");
447                         return -rte_errno;
448                 }
449                 if ((udp_mask->hdr.src_port != 0 &&
450                         udp_mask->hdr.src_port != UINT16_MAX) ||
451                         (udp_mask->hdr.dst_port != 0 &&
452                         udp_mask->hdr.dst_port != UINT16_MAX)) {
453                         rte_flow_error_set(error,
454                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
455                                 item, "Not supported by ntuple filter");
456                         return -rte_errno;
457                 }
458
459                 filter->dst_port_mask = udp_mask->hdr.dst_port;
460                 filter->src_port_mask = udp_mask->hdr.src_port;
461
462                 udp_spec = item->spec;
463                 filter->dst_port = udp_spec->hdr.dst_port;
464                 filter->src_port = udp_spec->hdr.src_port;
465         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
466                 sctp_mask = item->mask;
467
468                 /**
469                  * Only support src & dst ports,
470                  * others should be masked.
471                  */
472                 if (sctp_mask->hdr.tag ||
473                     sctp_mask->hdr.cksum) {
474                         memset(filter, 0,
475                                 sizeof(struct rte_eth_ntuple_filter));
476                         rte_flow_error_set(error, EINVAL,
477                                 RTE_FLOW_ERROR_TYPE_ITEM,
478                                 item, "Not supported by ntuple filter");
479                         return -rte_errno;
480                 }
481
482                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
483                 filter->src_port_mask = sctp_mask->hdr.src_port;
484
485                 sctp_spec = item->spec;
486                 filter->dst_port = sctp_spec->hdr.dst_port;
487                 filter->src_port = sctp_spec->hdr.src_port;
488         } else {
489                 goto action;
490         }
491
492         /* check if the next not void item is END */
493         item = next_no_void_pattern(pattern, item);
494         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
495                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
496                 rte_flow_error_set(error, EINVAL,
497                         RTE_FLOW_ERROR_TYPE_ITEM,
498                         item, "Not supported by ntuple filter");
499                 return -rte_errno;
500         }
501
502 action:
503
504         /**
505          * n-tuple only supports forwarding,
506          * check if the first not void action is QUEUE.
507          */
508         act = next_no_void_action(actions, NULL);
509         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
510                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
511                 rte_flow_error_set(error, EINVAL,
512                         RTE_FLOW_ERROR_TYPE_ACTION,
513                         act, "Not supported action.");
514                 return -rte_errno;
515         }
516         filter->queue =
517                 ((const struct rte_flow_action_queue *)act->conf)->index;
518
519         /* check if the next not void item is END */
520         act = next_no_void_action(actions, act);
521         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
522                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
523                 rte_flow_error_set(error, EINVAL,
524                         RTE_FLOW_ERROR_TYPE_ACTION,
525                         act, "Not supported action.");
526                 return -rte_errno;
527         }
528
529         /* parse attr */
530         /* must be input direction */
531         if (!attr->ingress) {
532                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
533                 rte_flow_error_set(error, EINVAL,
534                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
535                                    attr, "Only support ingress.");
536                 return -rte_errno;
537         }
538
539         /* not supported */
540         if (attr->egress) {
541                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
542                 rte_flow_error_set(error, EINVAL,
543                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
544                                    attr, "Not support egress.");
545                 return -rte_errno;
546         }
547
548         /* not supported */
549         if (attr->transfer) {
550                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
551                 rte_flow_error_set(error, EINVAL,
552                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
553                                    attr, "No support for transfer.");
554                 return -rte_errno;
555         }
556
557         if (attr->priority > 0xFFFF) {
558                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
559                 rte_flow_error_set(error, EINVAL,
560                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
561                                    attr, "Error priority.");
562                 return -rte_errno;
563         }
564         filter->priority = (uint16_t)attr->priority;
565         if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
566                 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
567                 filter->priority = 1;
568
569         return 0;
570 }
571
572 /* a specific function for txgbe because the flags is specific */
573 static int
574 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
575                           const struct rte_flow_attr *attr,
576                           const struct rte_flow_item pattern[],
577                           const struct rte_flow_action actions[],
578                           struct rte_eth_ntuple_filter *filter,
579                           struct rte_flow_error *error)
580 {
581         int ret;
582
583         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
584
585         if (ret)
586                 return ret;
587
588 #ifdef RTE_LIB_SECURITY
589         /* ESP flow not really a flow */
590         if (filter->proto == IPPROTO_ESP)
591                 return 0;
592 #endif
593
594         /* txgbe doesn't support tcp flags */
595         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
596                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
597                 rte_flow_error_set(error, EINVAL,
598                                    RTE_FLOW_ERROR_TYPE_ITEM,
599                                    NULL, "Not supported by ntuple filter");
600                 return -rte_errno;
601         }
602
603         /* txgbe doesn't support many priorities */
604         if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
605             filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
606                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
607                 rte_flow_error_set(error, EINVAL,
608                         RTE_FLOW_ERROR_TYPE_ITEM,
609                         NULL, "Priority not supported by ntuple filter");
610                 return -rte_errno;
611         }
612
613         if (filter->queue >= dev->data->nb_rx_queues) {
614                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
615                 rte_flow_error_set(error, EINVAL,
616                                    RTE_FLOW_ERROR_TYPE_ITEM,
617                                    NULL, "Not supported by ntuple filter");
618                 return -rte_errno;
619         }
620
621         /* fixed value for txgbe */
622         filter->flags = RTE_5TUPLE_FLAGS;
623         return 0;
624 }
625
626 /**
627  * Parse the rule to see if it is a ethertype rule.
628  * And get the ethertype filter info BTW.
629  * pattern:
630  * The first not void item can be ETH.
631  * The next not void item must be END.
632  * action:
633  * The first not void action should be QUEUE.
634  * The next not void action should be END.
635  * pattern example:
636  * ITEM         Spec                    Mask
637  * ETH          type    0x0807          0xFFFF
638  * END
639  * other members in mask and spec should set to 0x00.
640  * item->last should be NULL.
641  */
642 static int
643 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
644                             const struct rte_flow_item *pattern,
645                             const struct rte_flow_action *actions,
646                             struct rte_eth_ethertype_filter *filter,
647                             struct rte_flow_error *error)
648 {
649         const struct rte_flow_item *item;
650         const struct rte_flow_action *act;
651         const struct rte_flow_item_eth *eth_spec;
652         const struct rte_flow_item_eth *eth_mask;
653         const struct rte_flow_action_queue *act_q;
654
655         if (!pattern) {
656                 rte_flow_error_set(error, EINVAL,
657                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
658                                 NULL, "NULL pattern.");
659                 return -rte_errno;
660         }
661
662         if (!actions) {
663                 rte_flow_error_set(error, EINVAL,
664                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
665                                 NULL, "NULL action.");
666                 return -rte_errno;
667         }
668
669         if (!attr) {
670                 rte_flow_error_set(error, EINVAL,
671                                    RTE_FLOW_ERROR_TYPE_ATTR,
672                                    NULL, "NULL attribute.");
673                 return -rte_errno;
674         }
675
676         item = next_no_void_pattern(pattern, NULL);
677         /* The first non-void item should be MAC. */
678         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
679                 rte_flow_error_set(error, EINVAL,
680                         RTE_FLOW_ERROR_TYPE_ITEM,
681                         item, "Not supported by ethertype filter");
682                 return -rte_errno;
683         }
684
685         /*Not supported last point for range*/
686         if (item->last) {
687                 rte_flow_error_set(error, EINVAL,
688                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
689                         item, "Not supported last point for range");
690                 return -rte_errno;
691         }
692
693         /* Get the MAC info. */
694         if (!item->spec || !item->mask) {
695                 rte_flow_error_set(error, EINVAL,
696                                 RTE_FLOW_ERROR_TYPE_ITEM,
697                                 item, "Not supported by ethertype filter");
698                 return -rte_errno;
699         }
700
701         eth_spec = item->spec;
702         eth_mask = item->mask;
703
704         /* Mask bits of source MAC address must be full of 0.
705          * Mask bits of destination MAC address must be full
706          * of 1 or full of 0.
707          */
708         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
709             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
710              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
711                 rte_flow_error_set(error, EINVAL,
712                                 RTE_FLOW_ERROR_TYPE_ITEM,
713                                 item, "Invalid ether address mask");
714                 return -rte_errno;
715         }
716
717         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
718                 rte_flow_error_set(error, EINVAL,
719                                 RTE_FLOW_ERROR_TYPE_ITEM,
720                                 item, "Invalid ethertype mask");
721                 return -rte_errno;
722         }
723
724         /* If mask bits of destination MAC address
725          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
726          */
727         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
728                 filter->mac_addr = eth_spec->dst;
729                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
730         } else {
731                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
732         }
733         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
734
735         /* Check if the next non-void item is END. */
736         item = next_no_void_pattern(pattern, item);
737         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
738                 rte_flow_error_set(error, EINVAL,
739                                 RTE_FLOW_ERROR_TYPE_ITEM,
740                                 item, "Not supported by ethertype filter.");
741                 return -rte_errno;
742         }
743
744         /* Parse action */
745
746         act = next_no_void_action(actions, NULL);
747         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
748             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
749                 rte_flow_error_set(error, EINVAL,
750                                 RTE_FLOW_ERROR_TYPE_ACTION,
751                                 act, "Not supported action.");
752                 return -rte_errno;
753         }
754
755         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
756                 act_q = (const struct rte_flow_action_queue *)act->conf;
757                 filter->queue = act_q->index;
758         } else {
759                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
760         }
761
762         /* Check if the next non-void item is END */
763         act = next_no_void_action(actions, act);
764         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
765                 rte_flow_error_set(error, EINVAL,
766                                 RTE_FLOW_ERROR_TYPE_ACTION,
767                                 act, "Not supported action.");
768                 return -rte_errno;
769         }
770
771         /* Parse attr */
772         /* Must be input direction */
773         if (!attr->ingress) {
774                 rte_flow_error_set(error, EINVAL,
775                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
776                                 attr, "Only support ingress.");
777                 return -rte_errno;
778         }
779
780         /* Not supported */
781         if (attr->egress) {
782                 rte_flow_error_set(error, EINVAL,
783                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
784                                 attr, "Not support egress.");
785                 return -rte_errno;
786         }
787
788         /* Not supported */
789         if (attr->transfer) {
790                 rte_flow_error_set(error, EINVAL,
791                                 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
792                                 attr, "No support for transfer.");
793                 return -rte_errno;
794         }
795
796         /* Not supported */
797         if (attr->priority) {
798                 rte_flow_error_set(error, EINVAL,
799                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
800                                 attr, "Not support priority.");
801                 return -rte_errno;
802         }
803
804         /* Not supported */
805         if (attr->group) {
806                 rte_flow_error_set(error, EINVAL,
807                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
808                                 attr, "Not support group.");
809                 return -rte_errno;
810         }
811
812         return 0;
813 }
814
815 static int
816 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
817                              const struct rte_flow_attr *attr,
818                              const struct rte_flow_item pattern[],
819                              const struct rte_flow_action actions[],
820                              struct rte_eth_ethertype_filter *filter,
821                              struct rte_flow_error *error)
822 {
823         int ret;
824
825         ret = cons_parse_ethertype_filter(attr, pattern,
826                                         actions, filter, error);
827
828         if (ret)
829                 return ret;
830
831         if (filter->queue >= dev->data->nb_rx_queues) {
832                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
833                 rte_flow_error_set(error, EINVAL,
834                         RTE_FLOW_ERROR_TYPE_ITEM,
835                         NULL, "queue index much too big");
836                 return -rte_errno;
837         }
838
839         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
840                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
841                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
842                 rte_flow_error_set(error, EINVAL,
843                         RTE_FLOW_ERROR_TYPE_ITEM,
844                         NULL, "IPv4/IPv6 not supported by ethertype filter");
845                 return -rte_errno;
846         }
847
848         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
849                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
850                 rte_flow_error_set(error, EINVAL,
851                         RTE_FLOW_ERROR_TYPE_ITEM,
852                         NULL, "mac compare is unsupported");
853                 return -rte_errno;
854         }
855
856         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
857                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
858                 rte_flow_error_set(error, EINVAL,
859                         RTE_FLOW_ERROR_TYPE_ITEM,
860                         NULL, "drop option is unsupported");
861                 return -rte_errno;
862         }
863
864         return 0;
865 }
866
867 /**
868  * Parse the rule to see if it is a TCP SYN rule.
869  * And get the TCP SYN filter info BTW.
870  * pattern:
871  * The first not void item must be ETH.
872  * The second not void item must be IPV4 or IPV6.
873  * The third not void item must be TCP.
874  * The next not void item must be END.
875  * action:
876  * The first not void action should be QUEUE.
877  * The next not void action should be END.
878  * pattern example:
879  * ITEM         Spec                    Mask
880  * ETH          NULL                    NULL
881  * IPV4/IPV6    NULL                    NULL
882  * TCP          tcp_flags       0x02    0xFF
883  * END
884  * other members in mask and spec should set to 0x00.
885  * item->last should be NULL.
886  */
887 static int
888 cons_parse_syn_filter(const struct rte_flow_attr *attr,
889                                 const struct rte_flow_item pattern[],
890                                 const struct rte_flow_action actions[],
891                                 struct rte_eth_syn_filter *filter,
892                                 struct rte_flow_error *error)
893 {
894         const struct rte_flow_item *item;
895         const struct rte_flow_action *act;
896         const struct rte_flow_item_tcp *tcp_spec;
897         const struct rte_flow_item_tcp *tcp_mask;
898         const struct rte_flow_action_queue *act_q;
899
900         if (!pattern) {
901                 rte_flow_error_set(error, EINVAL,
902                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
903                                 NULL, "NULL pattern.");
904                 return -rte_errno;
905         }
906
907         if (!actions) {
908                 rte_flow_error_set(error, EINVAL,
909                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
910                                 NULL, "NULL action.");
911                 return -rte_errno;
912         }
913
914         if (!attr) {
915                 rte_flow_error_set(error, EINVAL,
916                                    RTE_FLOW_ERROR_TYPE_ATTR,
917                                    NULL, "NULL attribute.");
918                 return -rte_errno;
919         }
920
921
922         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
923         item = next_no_void_pattern(pattern, NULL);
924         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
925             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
926             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
927             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
928                 rte_flow_error_set(error, EINVAL,
929                                 RTE_FLOW_ERROR_TYPE_ITEM,
930                                 item, "Not supported by syn filter");
931                 return -rte_errno;
932         }
933                 /*Not supported last point for range*/
934         if (item->last) {
935                 rte_flow_error_set(error, EINVAL,
936                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
937                         item, "Not supported last point for range");
938                 return -rte_errno;
939         }
940
941         /* Skip Ethernet */
942         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
943                 /* if the item is MAC, the content should be NULL */
944                 if (item->spec || item->mask) {
945                         rte_flow_error_set(error, EINVAL,
946                                 RTE_FLOW_ERROR_TYPE_ITEM,
947                                 item, "Invalid SYN address mask");
948                         return -rte_errno;
949                 }
950
951                 /* check if the next not void item is IPv4 or IPv6 */
952                 item = next_no_void_pattern(pattern, item);
953                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
954                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
955                         rte_flow_error_set(error, EINVAL,
956                                 RTE_FLOW_ERROR_TYPE_ITEM,
957                                 item, "Not supported by syn filter");
958                         return -rte_errno;
959                 }
960         }
961
962         /* Skip IP */
963         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
964             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
965                 /* if the item is IP, the content should be NULL */
966                 if (item->spec || item->mask) {
967                         rte_flow_error_set(error, EINVAL,
968                                 RTE_FLOW_ERROR_TYPE_ITEM,
969                                 item, "Invalid SYN mask");
970                         return -rte_errno;
971                 }
972
973                 /* check if the next not void item is TCP */
974                 item = next_no_void_pattern(pattern, item);
975                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
976                         rte_flow_error_set(error, EINVAL,
977                                 RTE_FLOW_ERROR_TYPE_ITEM,
978                                 item, "Not supported by syn filter");
979                         return -rte_errno;
980                 }
981         }
982
983         /* Get the TCP info. Only support SYN. */
984         if (!item->spec || !item->mask) {
985                 rte_flow_error_set(error, EINVAL,
986                                 RTE_FLOW_ERROR_TYPE_ITEM,
987                                 item, "Invalid SYN mask");
988                 return -rte_errno;
989         }
990         /*Not supported last point for range*/
991         if (item->last) {
992                 rte_flow_error_set(error, EINVAL,
993                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
994                         item, "Not supported last point for range");
995                 return -rte_errno;
996         }
997
998         tcp_spec = item->spec;
999         tcp_mask = item->mask;
1000         if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
1001             tcp_mask->hdr.src_port ||
1002             tcp_mask->hdr.dst_port ||
1003             tcp_mask->hdr.sent_seq ||
1004             tcp_mask->hdr.recv_ack ||
1005             tcp_mask->hdr.data_off ||
1006             tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
1007             tcp_mask->hdr.rx_win ||
1008             tcp_mask->hdr.cksum ||
1009             tcp_mask->hdr.tcp_urp) {
1010                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1011                 rte_flow_error_set(error, EINVAL,
1012                                 RTE_FLOW_ERROR_TYPE_ITEM,
1013                                 item, "Not supported by syn filter");
1014                 return -rte_errno;
1015         }
1016
1017         /* check if the next not void item is END */
1018         item = next_no_void_pattern(pattern, item);
1019         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1020                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1021                 rte_flow_error_set(error, EINVAL,
1022                                 RTE_FLOW_ERROR_TYPE_ITEM,
1023                                 item, "Not supported by syn filter");
1024                 return -rte_errno;
1025         }
1026
1027         /* check if the first not void action is QUEUE. */
1028         act = next_no_void_action(actions, NULL);
1029         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1030                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1031                 rte_flow_error_set(error, EINVAL,
1032                                 RTE_FLOW_ERROR_TYPE_ACTION,
1033                                 act, "Not supported action.");
1034                 return -rte_errno;
1035         }
1036
1037         act_q = (const struct rte_flow_action_queue *)act->conf;
1038         filter->queue = act_q->index;
1039         if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
1040                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1041                 rte_flow_error_set(error, EINVAL,
1042                                 RTE_FLOW_ERROR_TYPE_ACTION,
1043                                 act, "Not supported action.");
1044                 return -rte_errno;
1045         }
1046
1047         /* check if the next not void item is END */
1048         act = next_no_void_action(actions, act);
1049         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1050                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1051                 rte_flow_error_set(error, EINVAL,
1052                                 RTE_FLOW_ERROR_TYPE_ACTION,
1053                                 act, "Not supported action.");
1054                 return -rte_errno;
1055         }
1056
1057         /* parse attr */
1058         /* must be input direction */
1059         if (!attr->ingress) {
1060                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1061                 rte_flow_error_set(error, EINVAL,
1062                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1063                         attr, "Only support ingress.");
1064                 return -rte_errno;
1065         }
1066
1067         /* not supported */
1068         if (attr->egress) {
1069                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1070                 rte_flow_error_set(error, EINVAL,
1071                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1072                         attr, "Not support egress.");
1073                 return -rte_errno;
1074         }
1075
1076         /* not supported */
1077         if (attr->transfer) {
1078                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1079                 rte_flow_error_set(error, EINVAL,
1080                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1081                         attr, "No support for transfer.");
1082                 return -rte_errno;
1083         }
1084
1085         /* Support 2 priorities, the lowest or highest. */
1086         if (!attr->priority) {
1087                 filter->hig_pri = 0;
1088         } else if (attr->priority == (uint32_t)~0U) {
1089                 filter->hig_pri = 1;
1090         } else {
1091                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1092                 rte_flow_error_set(error, EINVAL,
1093                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1094                         attr, "Not support priority.");
1095                 return -rte_errno;
1096         }
1097
1098         return 0;
1099 }
1100
1101 static int
1102 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1103                              const struct rte_flow_attr *attr,
1104                              const struct rte_flow_item pattern[],
1105                              const struct rte_flow_action actions[],
1106                              struct rte_eth_syn_filter *filter,
1107                              struct rte_flow_error *error)
1108 {
1109         int ret;
1110
1111         ret = cons_parse_syn_filter(attr, pattern,
1112                                         actions, filter, error);
1113
1114         if (filter->queue >= dev->data->nb_rx_queues)
1115                 return -rte_errno;
1116
1117         if (ret)
1118                 return ret;
1119
1120         return 0;
1121 }
1122
1123 /**
1124  * Parse the rule to see if it is a L2 tunnel rule.
1125  * And get the L2 tunnel filter info BTW.
1126  * Only support E-tag now.
1127  * pattern:
1128  * The first not void item can be E_TAG.
1129  * The next not void item must be END.
1130  * action:
1131  * The first not void action should be VF or PF.
1132  * The next not void action should be END.
1133  * pattern example:
1134  * ITEM         Spec                    Mask
1135  * E_TAG        grp             0x1     0x3
1136                 e_cid_base      0x309   0xFFF
1137  * END
1138  * other members in mask and spec should set to 0x00.
1139  * item->last should be NULL.
1140  */
1141 static int
1142 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1143                         const struct rte_flow_attr *attr,
1144                         const struct rte_flow_item pattern[],
1145                         const struct rte_flow_action actions[],
1146                         struct txgbe_l2_tunnel_conf *filter,
1147                         struct rte_flow_error *error)
1148 {
1149         const struct rte_flow_item *item;
1150         const struct rte_flow_item_e_tag *e_tag_spec;
1151         const struct rte_flow_item_e_tag *e_tag_mask;
1152         const struct rte_flow_action *act;
1153         const struct rte_flow_action_vf *act_vf;
1154         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1155
1156         if (!pattern) {
1157                 rte_flow_error_set(error, EINVAL,
1158                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1159                         NULL, "NULL pattern.");
1160                 return -rte_errno;
1161         }
1162
1163         if (!actions) {
1164                 rte_flow_error_set(error, EINVAL,
1165                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1166                                    NULL, "NULL action.");
1167                 return -rte_errno;
1168         }
1169
1170         if (!attr) {
1171                 rte_flow_error_set(error, EINVAL,
1172                                    RTE_FLOW_ERROR_TYPE_ATTR,
1173                                    NULL, "NULL attribute.");
1174                 return -rte_errno;
1175         }
1176
1177         /* The first not void item should be e-tag. */
1178         item = next_no_void_pattern(pattern, NULL);
1179         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1180                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1181                 rte_flow_error_set(error, EINVAL,
1182                         RTE_FLOW_ERROR_TYPE_ITEM,
1183                         item, "Not supported by L2 tunnel filter");
1184                 return -rte_errno;
1185         }
1186
1187         if (!item->spec || !item->mask) {
1188                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1189                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1190                         item, "Not supported by L2 tunnel filter");
1191                 return -rte_errno;
1192         }
1193
1194         /*Not supported last point for range*/
1195         if (item->last) {
1196                 rte_flow_error_set(error, EINVAL,
1197                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1198                         item, "Not supported last point for range");
1199                 return -rte_errno;
1200         }
1201
1202         e_tag_spec = item->spec;
1203         e_tag_mask = item->mask;
1204
1205         /* Only care about GRP and E cid base. */
1206         if (e_tag_mask->epcp_edei_in_ecid_b ||
1207             e_tag_mask->in_ecid_e ||
1208             e_tag_mask->ecid_e ||
1209             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1210                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1211                 rte_flow_error_set(error, EINVAL,
1212                         RTE_FLOW_ERROR_TYPE_ITEM,
1213                         item, "Not supported by L2 tunnel filter");
1214                 return -rte_errno;
1215         }
1216
1217         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1218         /**
1219          * grp and e_cid_base are bit fields and only use 14 bits.
1220          * e-tag id is taken as little endian by HW.
1221          */
1222         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1223
1224         /* check if the next not void item is END */
1225         item = next_no_void_pattern(pattern, item);
1226         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1227                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1228                 rte_flow_error_set(error, EINVAL,
1229                         RTE_FLOW_ERROR_TYPE_ITEM,
1230                         item, "Not supported by L2 tunnel filter");
1231                 return -rte_errno;
1232         }
1233
1234         /* parse attr */
1235         /* must be input direction */
1236         if (!attr->ingress) {
1237                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1238                 rte_flow_error_set(error, EINVAL,
1239                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1240                         attr, "Only support ingress.");
1241                 return -rte_errno;
1242         }
1243
1244         /* not supported */
1245         if (attr->egress) {
1246                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1247                 rte_flow_error_set(error, EINVAL,
1248                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1249                         attr, "Not support egress.");
1250                 return -rte_errno;
1251         }
1252
1253         /* not supported */
1254         if (attr->transfer) {
1255                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1256                 rte_flow_error_set(error, EINVAL,
1257                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1258                         attr, "No support for transfer.");
1259                 return -rte_errno;
1260         }
1261
1262         /* not supported */
1263         if (attr->priority) {
1264                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1265                 rte_flow_error_set(error, EINVAL,
1266                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1267                         attr, "Not support priority.");
1268                 return -rte_errno;
1269         }
1270
1271         /* check if the first not void action is VF or PF. */
1272         act = next_no_void_action(actions, NULL);
1273         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1274                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1275                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1276                 rte_flow_error_set(error, EINVAL,
1277                         RTE_FLOW_ERROR_TYPE_ACTION,
1278                         act, "Not supported action.");
1279                 return -rte_errno;
1280         }
1281
1282         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1283                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1284                 filter->pool = act_vf->id;
1285         } else {
1286                 filter->pool = pci_dev->max_vfs;
1287         }
1288
1289         /* check if the next not void item is END */
1290         act = next_no_void_action(actions, act);
1291         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1292                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1293                 rte_flow_error_set(error, EINVAL,
1294                         RTE_FLOW_ERROR_TYPE_ACTION,
1295                         act, "Not supported action.");
1296                 return -rte_errno;
1297         }
1298
1299         return 0;
1300 }
1301
1302 static int
1303 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1304                         const struct rte_flow_attr *attr,
1305                         const struct rte_flow_item pattern[],
1306                         const struct rte_flow_action actions[],
1307                         struct txgbe_l2_tunnel_conf *l2_tn_filter,
1308                         struct rte_flow_error *error)
1309 {
1310         int ret = 0;
1311         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1312         uint16_t vf_num;
1313
1314         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1315                                 actions, l2_tn_filter, error);
1316
1317         vf_num = pci_dev->max_vfs;
1318
1319         if (l2_tn_filter->pool > vf_num)
1320                 return -rte_errno;
1321
1322         return ret;
1323 }
1324
1325 /* Parse to get the attr and action info of flow director rule. */
1326 static int
1327 txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1328                           const struct rte_flow_action actions[],
1329                           struct txgbe_fdir_rule *rule,
1330                           struct rte_flow_error *error)
1331 {
1332         const struct rte_flow_action *act;
1333         const struct rte_flow_action_queue *act_q;
1334         const struct rte_flow_action_mark *mark;
1335
1336         /* parse attr */
1337         /* must be input direction */
1338         if (!attr->ingress) {
1339                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1340                 rte_flow_error_set(error, EINVAL,
1341                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1342                         attr, "Only support ingress.");
1343                 return -rte_errno;
1344         }
1345
1346         /* not supported */
1347         if (attr->egress) {
1348                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1349                 rte_flow_error_set(error, EINVAL,
1350                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1351                         attr, "Not support egress.");
1352                 return -rte_errno;
1353         }
1354
1355         /* not supported */
1356         if (attr->transfer) {
1357                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1358                 rte_flow_error_set(error, EINVAL,
1359                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1360                         attr, "No support for transfer.");
1361                 return -rte_errno;
1362         }
1363
1364         /* not supported */
1365         if (attr->priority) {
1366                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1367                 rte_flow_error_set(error, EINVAL,
1368                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1369                         attr, "Not support priority.");
1370                 return -rte_errno;
1371         }
1372
1373         /* check if the first not void action is QUEUE or DROP. */
1374         act = next_no_void_action(actions, NULL);
1375         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1376             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1377                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1378                 rte_flow_error_set(error, EINVAL,
1379                         RTE_FLOW_ERROR_TYPE_ACTION,
1380                         act, "Not supported action.");
1381                 return -rte_errno;
1382         }
1383
1384         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1385                 act_q = (const struct rte_flow_action_queue *)act->conf;
1386                 rule->queue = act_q->index;
1387         } else { /* drop */
1388                 /* signature mode does not support drop action. */
1389                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1390                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1391                         rte_flow_error_set(error, EINVAL,
1392                                 RTE_FLOW_ERROR_TYPE_ACTION,
1393                                 act, "Not supported action.");
1394                         return -rte_errno;
1395                 }
1396                 rule->fdirflags = TXGBE_FDIRPICMD_DROP;
1397         }
1398
1399         /* check if the next not void item is MARK */
1400         act = next_no_void_action(actions, act);
1401         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1402                 act->type != RTE_FLOW_ACTION_TYPE_END) {
1403                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1404                 rte_flow_error_set(error, EINVAL,
1405                         RTE_FLOW_ERROR_TYPE_ACTION,
1406                         act, "Not supported action.");
1407                 return -rte_errno;
1408         }
1409
1410         rule->soft_id = 0;
1411
1412         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1413                 mark = (const struct rte_flow_action_mark *)act->conf;
1414                 rule->soft_id = mark->id;
1415                 act = next_no_void_action(actions, act);
1416         }
1417
1418         /* check if the next not void item is END */
1419         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1420                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1421                 rte_flow_error_set(error, EINVAL,
1422                         RTE_FLOW_ERROR_TYPE_ACTION,
1423                         act, "Not supported action.");
1424                 return -rte_errno;
1425         }
1426
1427         return 0;
1428 }
1429
1430 /* search next no void pattern and skip fuzzy */
1431 static inline
1432 const struct rte_flow_item *next_no_fuzzy_pattern(
1433                 const struct rte_flow_item pattern[],
1434                 const struct rte_flow_item *cur)
1435 {
1436         const struct rte_flow_item *next =
1437                 next_no_void_pattern(pattern, cur);
1438         while (1) {
1439                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1440                         return next;
1441                 next = next_no_void_pattern(pattern, next);
1442         }
1443 }
1444
1445 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1446 {
1447         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1448         const struct rte_flow_item *item;
1449         uint32_t sh, lh, mh;
1450         int i = 0;
1451
1452         while (1) {
1453                 item = pattern + i;
1454                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1455                         break;
1456
1457                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1458                         spec = item->spec;
1459                         last = item->last;
1460                         mask = item->mask;
1461
1462                         if (!spec || !mask)
1463                                 return 0;
1464
1465                         sh = spec->thresh;
1466
1467                         if (!last)
1468                                 lh = sh;
1469                         else
1470                                 lh = last->thresh;
1471
1472                         mh = mask->thresh;
1473                         sh = sh & mh;
1474                         lh = lh & mh;
1475
1476                         if (!sh || sh > lh)
1477                                 return 0;
1478
1479                         return 1;
1480                 }
1481
1482                 i++;
1483         }
1484
1485         return 0;
1486 }
1487
1488 /**
1489  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1490  * And get the flow director filter info BTW.
1491  * UDP/TCP/SCTP PATTERN:
1492  * The first not void item can be ETH or IPV4 or IPV6
1493  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1494  * The next not void item could be UDP or TCP or SCTP (optional)
1495  * The next not void item could be RAW (for flexbyte, optional)
1496  * The next not void item must be END.
1497  * A Fuzzy Match pattern can appear at any place before END.
1498  * Fuzzy Match is optional for IPV4 but is required for IPV6
1499  * MAC VLAN PATTERN:
1500  * The first not void item must be ETH.
1501  * The second not void item must be MAC VLAN.
1502  * The next not void item must be END.
1503  * ACTION:
1504  * The first not void action should be QUEUE or DROP.
1505  * The second not void optional action should be MARK,
1506  * mark_id is a uint32_t number.
1507  * The next not void action should be END.
1508  * UDP/TCP/SCTP pattern example:
1509  * ITEM         Spec                    Mask
1510  * ETH          NULL                    NULL
1511  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1512  *              dst_addr 192.167.3.50   0xFFFFFFFF
1513  * UDP/TCP/SCTP src_port        80      0xFFFF
1514  *              dst_port        80      0xFFFF
1515  * FLEX relative        0       0x1
1516  *              search          0       0x1
1517  *              reserved        0       0
1518  *              offset          12      0xFFFFFFFF
1519  *              limit           0       0xFFFF
1520  *              length          2       0xFFFF
1521  *              pattern[0]      0x86    0xFF
1522  *              pattern[1]      0xDD    0xFF
1523  * END
1524  * MAC VLAN pattern example:
1525  * ITEM         Spec                    Mask
1526  * ETH          dst_addr
1527                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1528                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1529  * MAC VLAN     tci     0x2016          0xEFFF
1530  * END
1531  * Other members in mask and spec should set to 0x00.
1532  * Item->last should be NULL.
1533  */
1534 static int
1535 txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
1536                                const struct rte_flow_attr *attr,
1537                                const struct rte_flow_item pattern[],
1538                                const struct rte_flow_action actions[],
1539                                struct txgbe_fdir_rule *rule,
1540                                struct rte_flow_error *error)
1541 {
1542         const struct rte_flow_item *item;
1543         const struct rte_flow_item_eth *eth_mask;
1544         const struct rte_flow_item_ipv4 *ipv4_spec;
1545         const struct rte_flow_item_ipv4 *ipv4_mask;
1546         const struct rte_flow_item_ipv6 *ipv6_spec;
1547         const struct rte_flow_item_ipv6 *ipv6_mask;
1548         const struct rte_flow_item_tcp *tcp_spec;
1549         const struct rte_flow_item_tcp *tcp_mask;
1550         const struct rte_flow_item_udp *udp_spec;
1551         const struct rte_flow_item_udp *udp_mask;
1552         const struct rte_flow_item_sctp *sctp_spec;
1553         const struct rte_flow_item_sctp *sctp_mask;
1554         const struct rte_flow_item_raw *raw_mask;
1555         const struct rte_flow_item_raw *raw_spec;
1556         u32 ptype = 0;
1557         uint8_t j;
1558
1559         if (!pattern) {
1560                 rte_flow_error_set(error, EINVAL,
1561                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1562                         NULL, "NULL pattern.");
1563                 return -rte_errno;
1564         }
1565
1566         if (!actions) {
1567                 rte_flow_error_set(error, EINVAL,
1568                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1569                                    NULL, "NULL action.");
1570                 return -rte_errno;
1571         }
1572
1573         if (!attr) {
1574                 rte_flow_error_set(error, EINVAL,
1575                                    RTE_FLOW_ERROR_TYPE_ATTR,
1576                                    NULL, "NULL attribute.");
1577                 return -rte_errno;
1578         }
1579
1580         /**
1581          * Some fields may not be provided. Set spec to 0 and mask to default
1582          * value. So, we need not do anything for the not provided fields later.
1583          */
1584         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1585         memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
1586         rule->mask.vlan_tci_mask = 0;
1587         rule->mask.flex_bytes_mask = 0;
1588
1589         /**
1590          * The first not void item should be
1591          * MAC or IPv4 or TCP or UDP or SCTP.
1592          */
1593         item = next_no_fuzzy_pattern(pattern, NULL);
1594         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1595             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1596             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1597             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1598             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1599             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1600                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1601                 rte_flow_error_set(error, EINVAL,
1602                         RTE_FLOW_ERROR_TYPE_ITEM,
1603                         item, "Not supported by fdir filter");
1604                 return -rte_errno;
1605         }
1606
1607         if (signature_match(pattern))
1608                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1609         else
1610                 rule->mode = RTE_FDIR_MODE_PERFECT;
1611
1612         /*Not supported last point for range*/
1613         if (item->last) {
1614                 rte_flow_error_set(error, EINVAL,
1615                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1616                         item, "Not supported last point for range");
1617                 return -rte_errno;
1618         }
1619
1620         /* Get the MAC info. */
1621         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1622                 /**
1623                  * Only support vlan and dst MAC address,
1624                  * others should be masked.
1625                  */
1626                 if (item->spec && !item->mask) {
1627                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1628                         rte_flow_error_set(error, EINVAL,
1629                                 RTE_FLOW_ERROR_TYPE_ITEM,
1630                                 item, "Not supported by fdir filter");
1631                         return -rte_errno;
1632                 }
1633
1634                 if (item->mask) {
1635                         rule->b_mask = TRUE;
1636                         eth_mask = item->mask;
1637
1638                         /* Ether type should be masked. */
1639                         if (eth_mask->type ||
1640                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1641                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1642                                 rte_flow_error_set(error, EINVAL,
1643                                         RTE_FLOW_ERROR_TYPE_ITEM,
1644                                         item, "Not supported by fdir filter");
1645                                 return -rte_errno;
1646                         }
1647
1648                         /* If ethernet has meaning, it means MAC VLAN mode. */
1649                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1650
1651                         /**
1652                          * src MAC address must be masked,
1653                          * and don't support dst MAC address mask.
1654                          */
1655                         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1656                                 if (eth_mask->src.addr_bytes[j] ||
1657                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1658                                         memset(rule, 0,
1659                                         sizeof(struct txgbe_fdir_rule));
1660                                         rte_flow_error_set(error, EINVAL,
1661                                         RTE_FLOW_ERROR_TYPE_ITEM,
1662                                         item, "Not supported by fdir filter");
1663                                         return -rte_errno;
1664                                 }
1665                         }
1666
1667                         /* When no VLAN, considered as full mask. */
1668                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1669                 }
1670                 /*** If both spec and mask are item,
1671                  * it means don't care about ETH.
1672                  * Do nothing.
1673                  */
1674
1675                 /**
1676                  * Check if the next not void item is vlan or ipv4.
1677                  * IPv6 is not supported.
1678                  */
1679                 item = next_no_fuzzy_pattern(pattern, item);
1680                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1681                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1682                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1683                                 rte_flow_error_set(error, EINVAL,
1684                                         RTE_FLOW_ERROR_TYPE_ITEM,
1685                                         item, "Not supported by fdir filter");
1686                                 return -rte_errno;
1687                         }
1688                 } else {
1689                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1690                                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1691                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1692                                 rte_flow_error_set(error, EINVAL,
1693                                         RTE_FLOW_ERROR_TYPE_ITEM,
1694                                         item, "Not supported by fdir filter");
1695                                 return -rte_errno;
1696                         }
1697                 }
1698         }
1699
1700         /* Get the IPV4 info. */
1701         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1702                 /**
1703                  * Set the flow type even if there's no content
1704                  * as we must have a flow type.
1705                  */
1706                 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
1707                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
1708                 /*Not supported last point for range*/
1709                 if (item->last) {
1710                         rte_flow_error_set(error, EINVAL,
1711                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1712                                 item, "Not supported last point for range");
1713                         return -rte_errno;
1714                 }
1715                 /**
1716                  * Only care about src & dst addresses,
1717                  * others should be masked.
1718                  */
1719                 if (!item->mask) {
1720                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1721                         rte_flow_error_set(error, EINVAL,
1722                                 RTE_FLOW_ERROR_TYPE_ITEM,
1723                                 item, "Not supported by fdir filter");
1724                         return -rte_errno;
1725                 }
1726                 rule->b_mask = TRUE;
1727                 ipv4_mask = item->mask;
1728                 if (ipv4_mask->hdr.version_ihl ||
1729                     ipv4_mask->hdr.type_of_service ||
1730                     ipv4_mask->hdr.total_length ||
1731                     ipv4_mask->hdr.packet_id ||
1732                     ipv4_mask->hdr.fragment_offset ||
1733                     ipv4_mask->hdr.time_to_live ||
1734                     ipv4_mask->hdr.next_proto_id ||
1735                     ipv4_mask->hdr.hdr_checksum) {
1736                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1737                         rte_flow_error_set(error, EINVAL,
1738                                 RTE_FLOW_ERROR_TYPE_ITEM,
1739                                 item, "Not supported by fdir filter");
1740                         return -rte_errno;
1741                 }
1742                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1743                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1744
1745                 if (item->spec) {
1746                         rule->b_spec = TRUE;
1747                         ipv4_spec = item->spec;
1748                         rule->input.dst_ip[0] =
1749                                 ipv4_spec->hdr.dst_addr;
1750                         rule->input.src_ip[0] =
1751                                 ipv4_spec->hdr.src_addr;
1752                 }
1753
1754                 /**
1755                  * Check if the next not void item is
1756                  * TCP or UDP or SCTP or END.
1757                  */
1758                 item = next_no_fuzzy_pattern(pattern, item);
1759                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1760                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1761                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1762                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1763                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1764                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1765                         rte_flow_error_set(error, EINVAL,
1766                                 RTE_FLOW_ERROR_TYPE_ITEM,
1767                                 item, "Not supported by fdir filter");
1768                         return -rte_errno;
1769                 }
1770         }
1771
1772         /* Get the IPV6 info. */
1773         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1774                 /**
1775                  * Set the flow type even if there's no content
1776                  * as we must have a flow type.
1777                  */
1778                 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
1779                 ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
1780
1781                 /**
1782                  * 1. must signature match
1783                  * 2. not support last
1784                  * 3. mask must not null
1785                  */
1786                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1787                     item->last ||
1788                     !item->mask) {
1789                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1790                         rte_flow_error_set(error, EINVAL,
1791                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1792                                 item, "Not supported last point for range");
1793                         return -rte_errno;
1794                 }
1795
1796                 rule->b_mask = TRUE;
1797                 ipv6_mask = item->mask;
1798                 if (ipv6_mask->hdr.vtc_flow ||
1799                     ipv6_mask->hdr.payload_len ||
1800                     ipv6_mask->hdr.proto ||
1801                     ipv6_mask->hdr.hop_limits) {
1802                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1803                         rte_flow_error_set(error, EINVAL,
1804                                 RTE_FLOW_ERROR_TYPE_ITEM,
1805                                 item, "Not supported by fdir filter");
1806                         return -rte_errno;
1807                 }
1808
1809                 /* check src addr mask */
1810                 for (j = 0; j < 16; j++) {
1811                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1812                                 rule->mask.src_ipv6_mask |= 1 << j;
1813                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1814                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1815                                 rte_flow_error_set(error, EINVAL,
1816                                         RTE_FLOW_ERROR_TYPE_ITEM,
1817                                         item, "Not supported by fdir filter");
1818                                 return -rte_errno;
1819                         }
1820                 }
1821
1822                 /* check dst addr mask */
1823                 for (j = 0; j < 16; j++) {
1824                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1825                                 rule->mask.dst_ipv6_mask |= 1 << j;
1826                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1827                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1828                                 rte_flow_error_set(error, EINVAL,
1829                                         RTE_FLOW_ERROR_TYPE_ITEM,
1830                                         item, "Not supported by fdir filter");
1831                                 return -rte_errno;
1832                         }
1833                 }
1834
1835                 if (item->spec) {
1836                         rule->b_spec = TRUE;
1837                         ipv6_spec = item->spec;
1838                         rte_memcpy(rule->input.src_ip,
1839                                    ipv6_spec->hdr.src_addr, 16);
1840                         rte_memcpy(rule->input.dst_ip,
1841                                    ipv6_spec->hdr.dst_addr, 16);
1842                 }
1843
1844                 /**
1845                  * Check if the next not void item is
1846                  * TCP or UDP or SCTP or END.
1847                  */
1848                 item = next_no_fuzzy_pattern(pattern, item);
1849                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1850                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1851                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1852                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1853                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1854                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1855                         rte_flow_error_set(error, EINVAL,
1856                                 RTE_FLOW_ERROR_TYPE_ITEM,
1857                                 item, "Not supported by fdir filter");
1858                         return -rte_errno;
1859                 }
1860         }
1861
1862         /* Get the TCP info. */
1863         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1864                 /**
1865                  * Set the flow type even if there's no content
1866                  * as we must have a flow type.
1867                  */
1868                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
1869                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
1870                 /*Not supported last point for range*/
1871                 if (item->last) {
1872                         rte_flow_error_set(error, EINVAL,
1873                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1874                                 item, "Not supported last point for range");
1875                         return -rte_errno;
1876                 }
1877                 /**
1878                  * Only care about src & dst ports,
1879                  * others should be masked.
1880                  */
1881                 if (!item->mask) {
1882                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1883                         rte_flow_error_set(error, EINVAL,
1884                                 RTE_FLOW_ERROR_TYPE_ITEM,
1885                                 item, "Not supported by fdir filter");
1886                         return -rte_errno;
1887                 }
1888                 rule->b_mask = TRUE;
1889                 tcp_mask = item->mask;
1890                 if (tcp_mask->hdr.sent_seq ||
1891                     tcp_mask->hdr.recv_ack ||
1892                     tcp_mask->hdr.data_off ||
1893                     tcp_mask->hdr.tcp_flags ||
1894                     tcp_mask->hdr.rx_win ||
1895                     tcp_mask->hdr.cksum ||
1896                     tcp_mask->hdr.tcp_urp) {
1897                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1898                         rte_flow_error_set(error, EINVAL,
1899                                 RTE_FLOW_ERROR_TYPE_ITEM,
1900                                 item, "Not supported by fdir filter");
1901                         return -rte_errno;
1902                 }
1903                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1904                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1905
1906                 if (item->spec) {
1907                         rule->b_spec = TRUE;
1908                         tcp_spec = item->spec;
1909                         rule->input.src_port =
1910                                 tcp_spec->hdr.src_port;
1911                         rule->input.dst_port =
1912                                 tcp_spec->hdr.dst_port;
1913                 }
1914
1915                 item = next_no_fuzzy_pattern(pattern, item);
1916                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1917                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1918                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1919                         rte_flow_error_set(error, EINVAL,
1920                                 RTE_FLOW_ERROR_TYPE_ITEM,
1921                                 item, "Not supported by fdir filter");
1922                         return -rte_errno;
1923                 }
1924         }
1925
1926         /* Get the UDP info */
1927         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1928                 /**
1929                  * Set the flow type even if there's no content
1930                  * as we must have a flow type.
1931                  */
1932                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
1933                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
1934                 /*Not supported last point for range*/
1935                 if (item->last) {
1936                         rte_flow_error_set(error, EINVAL,
1937                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1938                                 item, "Not supported last point for range");
1939                         return -rte_errno;
1940                 }
1941                 /**
1942                  * Only care about src & dst ports,
1943                  * others should be masked.
1944                  */
1945                 if (!item->mask) {
1946                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1947                         rte_flow_error_set(error, EINVAL,
1948                                 RTE_FLOW_ERROR_TYPE_ITEM,
1949                                 item, "Not supported by fdir filter");
1950                         return -rte_errno;
1951                 }
1952                 rule->b_mask = TRUE;
1953                 udp_mask = item->mask;
1954                 if (udp_mask->hdr.dgram_len ||
1955                     udp_mask->hdr.dgram_cksum) {
1956                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1957                         rte_flow_error_set(error, EINVAL,
1958                                 RTE_FLOW_ERROR_TYPE_ITEM,
1959                                 item, "Not supported by fdir filter");
1960                         return -rte_errno;
1961                 }
1962                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1963                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1964
1965                 if (item->spec) {
1966                         rule->b_spec = TRUE;
1967                         udp_spec = item->spec;
1968                         rule->input.src_port =
1969                                 udp_spec->hdr.src_port;
1970                         rule->input.dst_port =
1971                                 udp_spec->hdr.dst_port;
1972                 }
1973
1974                 item = next_no_fuzzy_pattern(pattern, item);
1975                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1976                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1977                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1978                         rte_flow_error_set(error, EINVAL,
1979                                 RTE_FLOW_ERROR_TYPE_ITEM,
1980                                 item, "Not supported by fdir filter");
1981                         return -rte_errno;
1982                 }
1983         }
1984
1985         /* Get the SCTP info */
1986         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1987                 /**
1988                  * Set the flow type even if there's no content
1989                  * as we must have a flow type.
1990                  */
1991                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
1992                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
1993                 /*Not supported last point for range*/
1994                 if (item->last) {
1995                         rte_flow_error_set(error, EINVAL,
1996                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1997                                 item, "Not supported last point for range");
1998                         return -rte_errno;
1999                 }
2000
2001                 /**
2002                  * Only care about src & dst ports,
2003                  * others should be masked.
2004                  */
2005                 if (!item->mask) {
2006                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2007                         rte_flow_error_set(error, EINVAL,
2008                                 RTE_FLOW_ERROR_TYPE_ITEM,
2009                                 item, "Not supported by fdir filter");
2010                         return -rte_errno;
2011                 }
2012                 rule->b_mask = TRUE;
2013                 sctp_mask = item->mask;
2014                 if (sctp_mask->hdr.tag ||
2015                         sctp_mask->hdr.cksum) {
2016                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2017                         rte_flow_error_set(error, EINVAL,
2018                                 RTE_FLOW_ERROR_TYPE_ITEM,
2019                                 item, "Not supported by fdir filter");
2020                         return -rte_errno;
2021                 }
2022                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2023                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2024
2025                 if (item->spec) {
2026                         rule->b_spec = TRUE;
2027                         sctp_spec = item->spec;
2028                         rule->input.src_port =
2029                                 sctp_spec->hdr.src_port;
2030                         rule->input.dst_port =
2031                                 sctp_spec->hdr.dst_port;
2032                 }
2033                 /* others even sctp port is not supported */
2034                 sctp_mask = item->mask;
2035                 if (sctp_mask &&
2036                         (sctp_mask->hdr.src_port ||
2037                          sctp_mask->hdr.dst_port ||
2038                          sctp_mask->hdr.tag ||
2039                          sctp_mask->hdr.cksum)) {
2040                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2041                         rte_flow_error_set(error, EINVAL,
2042                                 RTE_FLOW_ERROR_TYPE_ITEM,
2043                                 item, "Not supported by fdir filter");
2044                         return -rte_errno;
2045                 }
2046
2047                 item = next_no_fuzzy_pattern(pattern, item);
2048                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2049                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2050                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2051                         rte_flow_error_set(error, EINVAL,
2052                                 RTE_FLOW_ERROR_TYPE_ITEM,
2053                                 item, "Not supported by fdir filter");
2054                         return -rte_errno;
2055                 }
2056         }
2057
2058         /* Get the flex byte info */
2059         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2060                 /* Not supported last point for range*/
2061                 if (item->last) {
2062                         rte_flow_error_set(error, EINVAL,
2063                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2064                                 item, "Not supported last point for range");
2065                         return -rte_errno;
2066                 }
2067                 /* mask should not be null */
2068                 if (!item->mask || !item->spec) {
2069                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2070                         rte_flow_error_set(error, EINVAL,
2071                                 RTE_FLOW_ERROR_TYPE_ITEM,
2072                                 item, "Not supported by fdir filter");
2073                         return -rte_errno;
2074                 }
2075
2076                 raw_mask = item->mask;
2077
2078                 /* check mask */
2079                 if (raw_mask->relative != 0x1 ||
2080                     raw_mask->search != 0x1 ||
2081                     raw_mask->reserved != 0x0 ||
2082                     (uint32_t)raw_mask->offset != 0xffffffff ||
2083                     raw_mask->limit != 0xffff ||
2084                     raw_mask->length != 0xffff) {
2085                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2086                         rte_flow_error_set(error, EINVAL,
2087                                 RTE_FLOW_ERROR_TYPE_ITEM,
2088                                 item, "Not supported by fdir filter");
2089                         return -rte_errno;
2090                 }
2091
2092                 raw_spec = item->spec;
2093
2094                 /* check spec */
2095                 if (raw_spec->relative != 0 ||
2096                     raw_spec->search != 0 ||
2097                     raw_spec->reserved != 0 ||
2098                     raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
2099                     raw_spec->offset % 2 ||
2100                     raw_spec->limit != 0 ||
2101                     raw_spec->length != 2 ||
2102                     /* pattern can't be 0xffff */
2103                     (raw_spec->pattern[0] == 0xff &&
2104                      raw_spec->pattern[1] == 0xff)) {
2105                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2106                         rte_flow_error_set(error, EINVAL,
2107                                 RTE_FLOW_ERROR_TYPE_ITEM,
2108                                 item, "Not supported by fdir filter");
2109                         return -rte_errno;
2110                 }
2111
2112                 /* check pattern mask */
2113                 if (raw_mask->pattern[0] != 0xff ||
2114                     raw_mask->pattern[1] != 0xff) {
2115                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2116                         rte_flow_error_set(error, EINVAL,
2117                                 RTE_FLOW_ERROR_TYPE_ITEM,
2118                                 item, "Not supported by fdir filter");
2119                         return -rte_errno;
2120                 }
2121
2122                 rule->mask.flex_bytes_mask = 0xffff;
2123                 rule->input.flex_bytes =
2124                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2125                         raw_spec->pattern[0];
2126                 rule->flex_bytes_offset = raw_spec->offset;
2127         }
2128
2129         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2130                 /* check if the next not void item is END */
2131                 item = next_no_fuzzy_pattern(pattern, item);
2132                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2133                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2134                         rte_flow_error_set(error, EINVAL,
2135                                 RTE_FLOW_ERROR_TYPE_ITEM,
2136                                 item, "Not supported by fdir filter");
2137                         return -rte_errno;
2138                 }
2139         }
2140
2141         rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
2142
2143         return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2144 }
2145
2146 /**
2147  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2148  * And get the flow director filter info BTW.
2149  * VxLAN PATTERN:
2150  * The first not void item must be ETH.
2151  * The second not void item must be IPV4/ IPV6.
2152  * The third not void item must be NVGRE.
2153  * The next not void item must be END.
2154  * NVGRE PATTERN:
2155  * The first not void item must be ETH.
2156  * The second not void item must be IPV4/ IPV6.
2157  * The third not void item must be NVGRE.
2158  * The next not void item must be END.
2159  * ACTION:
2160  * The first not void action should be QUEUE or DROP.
2161  * The second not void optional action should be MARK,
2162  * mark_id is a uint32_t number.
2163  * The next not void action should be END.
2164  * VxLAN pattern example:
2165  * ITEM         Spec                    Mask
2166  * ETH          NULL                    NULL
2167  * IPV4/IPV6    NULL                    NULL
2168  * UDP          NULL                    NULL
2169  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2170  * MAC VLAN     tci     0x2016          0xEFFF
2171  * END
2172  * NEGRV pattern example:
2173  * ITEM         Spec                    Mask
2174  * ETH          NULL                    NULL
2175  * IPV4/IPV6    NULL                    NULL
2176  * NVGRE        protocol        0x6558  0xFFFF
2177  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2178  * MAC VLAN     tci     0x2016          0xEFFF
2179  * END
2180  * other members in mask and spec should set to 0x00.
2181  * item->last should be NULL.
2182  */
2183 static int
2184 txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2185                                const struct rte_flow_item pattern[],
2186                                const struct rte_flow_action actions[],
2187                                struct txgbe_fdir_rule *rule,
2188                                struct rte_flow_error *error)
2189 {
2190         const struct rte_flow_item *item;
2191         const struct rte_flow_item_eth *eth_mask;
2192         uint32_t j;
2193
2194         if (!pattern) {
2195                 rte_flow_error_set(error, EINVAL,
2196                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2197                                    NULL, "NULL pattern.");
2198                 return -rte_errno;
2199         }
2200
2201         if (!actions) {
2202                 rte_flow_error_set(error, EINVAL,
2203                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2204                                    NULL, "NULL action.");
2205                 return -rte_errno;
2206         }
2207
2208         if (!attr) {
2209                 rte_flow_error_set(error, EINVAL,
2210                                    RTE_FLOW_ERROR_TYPE_ATTR,
2211                                    NULL, "NULL attribute.");
2212                 return -rte_errno;
2213         }
2214
2215         /**
2216          * Some fields may not be provided. Set spec to 0 and mask to default
2217          * value. So, we need not do anything for the not provided fields later.
2218          */
2219         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2220         memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
2221         rule->mask.vlan_tci_mask = 0;
2222
2223         /**
2224          * The first not void item should be
2225          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2226          */
2227         item = next_no_void_pattern(pattern, NULL);
2228         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2229             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2230             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2231             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2232             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2233             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2234                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2235                 rte_flow_error_set(error, EINVAL,
2236                         RTE_FLOW_ERROR_TYPE_ITEM,
2237                         item, "Not supported by fdir filter");
2238                 return -rte_errno;
2239         }
2240
2241         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2242
2243         /* Skip MAC. */
2244         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2245                 /* Only used to describe the protocol stack. */
2246                 if (item->spec || item->mask) {
2247                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2248                         rte_flow_error_set(error, EINVAL,
2249                                 RTE_FLOW_ERROR_TYPE_ITEM,
2250                                 item, "Not supported by fdir filter");
2251                         return -rte_errno;
2252                 }
2253                 /* Not supported last point for range*/
2254                 if (item->last) {
2255                         rte_flow_error_set(error, EINVAL,
2256                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2257                                 item, "Not supported last point for range");
2258                         return -rte_errno;
2259                 }
2260
2261                 /* Check if the next not void item is IPv4 or IPv6. */
2262                 item = next_no_void_pattern(pattern, item);
2263                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2264                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2265                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2266                         rte_flow_error_set(error, EINVAL,
2267                                 RTE_FLOW_ERROR_TYPE_ITEM,
2268                                 item, "Not supported by fdir filter");
2269                         return -rte_errno;
2270                 }
2271         }
2272
2273         /* Skip IP. */
2274         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2275             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2276                 /* Only used to describe the protocol stack. */
2277                 if (item->spec || item->mask) {
2278                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2279                         rte_flow_error_set(error, EINVAL,
2280                                 RTE_FLOW_ERROR_TYPE_ITEM,
2281                                 item, "Not supported by fdir filter");
2282                         return -rte_errno;
2283                 }
2284                 /*Not supported last point for range*/
2285                 if (item->last) {
2286                         rte_flow_error_set(error, EINVAL,
2287                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2288                                 item, "Not supported last point for range");
2289                         return -rte_errno;
2290                 }
2291
2292                 /* Check if the next not void item is UDP or NVGRE. */
2293                 item = next_no_void_pattern(pattern, item);
2294                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2295                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2296                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2297                         rte_flow_error_set(error, EINVAL,
2298                                 RTE_FLOW_ERROR_TYPE_ITEM,
2299                                 item, "Not supported by fdir filter");
2300                         return -rte_errno;
2301                 }
2302         }
2303
2304         /* Skip UDP. */
2305         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2306                 /* Only used to describe the protocol stack. */
2307                 if (item->spec || item->mask) {
2308                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2309                         rte_flow_error_set(error, EINVAL,
2310                                 RTE_FLOW_ERROR_TYPE_ITEM,
2311                                 item, "Not supported by fdir filter");
2312                         return -rte_errno;
2313                 }
2314                 /*Not supported last point for range*/
2315                 if (item->last) {
2316                         rte_flow_error_set(error, EINVAL,
2317                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2318                                 item, "Not supported last point for range");
2319                         return -rte_errno;
2320                 }
2321
2322                 /* Check if the next not void item is VxLAN. */
2323                 item = next_no_void_pattern(pattern, item);
2324                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2325                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2326                         rte_flow_error_set(error, EINVAL,
2327                                 RTE_FLOW_ERROR_TYPE_ITEM,
2328                                 item, "Not supported by fdir filter");
2329                         return -rte_errno;
2330                 }
2331         }
2332
2333         /* check if the next not void item is MAC */
2334         item = next_no_void_pattern(pattern, item);
2335         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2336                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2337                 rte_flow_error_set(error, EINVAL,
2338                         RTE_FLOW_ERROR_TYPE_ITEM,
2339                         item, "Not supported by fdir filter");
2340                 return -rte_errno;
2341         }
2342
2343         /**
2344          * Only support vlan and dst MAC address,
2345          * others should be masked.
2346          */
2347
2348         if (!item->mask) {
2349                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2350                 rte_flow_error_set(error, EINVAL,
2351                         RTE_FLOW_ERROR_TYPE_ITEM,
2352                         item, "Not supported by fdir filter");
2353                 return -rte_errno;
2354         }
2355         /*Not supported last point for range*/
2356         if (item->last) {
2357                 rte_flow_error_set(error, EINVAL,
2358                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2359                         item, "Not supported last point for range");
2360                 return -rte_errno;
2361         }
2362         rule->b_mask = TRUE;
2363         eth_mask = item->mask;
2364
2365         /* Ether type should be masked. */
2366         if (eth_mask->type) {
2367                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2368                 rte_flow_error_set(error, EINVAL,
2369                         RTE_FLOW_ERROR_TYPE_ITEM,
2370                         item, "Not supported by fdir filter");
2371                 return -rte_errno;
2372         }
2373
2374         /* src MAC address should be masked. */
2375         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2376                 if (eth_mask->src.addr_bytes[j]) {
2377                         memset(rule, 0,
2378                                sizeof(struct txgbe_fdir_rule));
2379                         rte_flow_error_set(error, EINVAL,
2380                                 RTE_FLOW_ERROR_TYPE_ITEM,
2381                                 item, "Not supported by fdir filter");
2382                         return -rte_errno;
2383                 }
2384         }
2385         rule->mask.mac_addr_byte_mask = 0;
2386         for (j = 0; j < ETH_ADDR_LEN; j++) {
2387                 /* It's a per byte mask. */
2388                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2389                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2390                 } else if (eth_mask->dst.addr_bytes[j]) {
2391                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2392                         rte_flow_error_set(error, EINVAL,
2393                                 RTE_FLOW_ERROR_TYPE_ITEM,
2394                                 item, "Not supported by fdir filter");
2395                         return -rte_errno;
2396                 }
2397         }
2398
2399         /* When no vlan, considered as full mask. */
2400         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2401
2402         /**
2403          * Check if the next not void item is vlan or ipv4.
2404          * IPv6 is not supported.
2405          */
2406         item = next_no_void_pattern(pattern, item);
2407         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
2408                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2409                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2410                 rte_flow_error_set(error, EINVAL,
2411                         RTE_FLOW_ERROR_TYPE_ITEM,
2412                         item, "Not supported by fdir filter");
2413                 return -rte_errno;
2414         }
2415         /*Not supported last point for range*/
2416         if (item->last) {
2417                 rte_flow_error_set(error, EINVAL,
2418                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2419                         item, "Not supported last point for range");
2420                 return -rte_errno;
2421         }
2422
2423         /**
2424          * If the tags is 0, it means don't care about the VLAN.
2425          * Do nothing.
2426          */
2427
2428         return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2429 }
2430
2431 static int
2432 txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2433                         const struct rte_flow_attr *attr,
2434                         const struct rte_flow_item pattern[],
2435                         const struct rte_flow_action actions[],
2436                         struct txgbe_fdir_rule *rule,
2437                         struct rte_flow_error *error)
2438 {
2439         int ret;
2440         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2441         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2442
2443         ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
2444                                         actions, rule, error);
2445         if (!ret)
2446                 goto step_next;
2447
2448         ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
2449                                         actions, rule, error);
2450         if (ret)
2451                 return ret;
2452
2453 step_next:
2454
2455         if (hw->mac.type == txgbe_mac_raptor &&
2456                 rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
2457                 (rule->input.src_port != 0 || rule->input.dst_port != 0))
2458                 return -ENOTSUP;
2459
2460         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2461             fdir_mode != rule->mode)
2462                 return -ENOTSUP;
2463
2464         if (rule->queue >= dev->data->nb_rx_queues)
2465                 return -ENOTSUP;
2466
2467         return ret;
2468 }
2469
2470 static int
2471 txgbe_parse_rss_filter(struct rte_eth_dev *dev,
2472                         const struct rte_flow_attr *attr,
2473                         const struct rte_flow_action actions[],
2474                         struct txgbe_rte_flow_rss_conf *rss_conf,
2475                         struct rte_flow_error *error)
2476 {
2477         const struct rte_flow_action *act;
2478         const struct rte_flow_action_rss *rss;
2479         uint16_t n;
2480
2481         /**
2482          * rss only supports forwarding,
2483          * check if the first not void action is RSS.
2484          */
2485         act = next_no_void_action(actions, NULL);
2486         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2487                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2488                 rte_flow_error_set(error, EINVAL,
2489                         RTE_FLOW_ERROR_TYPE_ACTION,
2490                         act, "Not supported action.");
2491                 return -rte_errno;
2492         }
2493
2494         rss = (const struct rte_flow_action_rss *)act->conf;
2495
2496         if (!rss || !rss->queue_num) {
2497                 rte_flow_error_set(error, EINVAL,
2498                                 RTE_FLOW_ERROR_TYPE_ACTION,
2499                                 act,
2500                            "no valid queues");
2501                 return -rte_errno;
2502         }
2503
2504         for (n = 0; n < rss->queue_num; n++) {
2505                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2506                         rte_flow_error_set(error, EINVAL,
2507                                    RTE_FLOW_ERROR_TYPE_ACTION,
2508                                    act,
2509                                    "queue id > max number of queues");
2510                         return -rte_errno;
2511                 }
2512         }
2513
2514         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2515                 return rte_flow_error_set
2516                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2517                          "non-default RSS hash functions are not supported");
2518         if (rss->level)
2519                 return rte_flow_error_set
2520                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2521                          "a nonzero RSS encapsulation level is not supported");
2522         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2523                 return rte_flow_error_set
2524                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2525                          "RSS hash key must be exactly 40 bytes");
2526         if (rss->queue_num > RTE_DIM(rss_conf->queue))
2527                 return rte_flow_error_set
2528                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2529                          "too many queues for RSS context");
2530         if (txgbe_rss_conf_init(rss_conf, rss))
2531                 return rte_flow_error_set
2532                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2533                          "RSS context initialization failure");
2534
2535         /* check if the next not void item is END */
2536         act = next_no_void_action(actions, act);
2537         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2538                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2539                 rte_flow_error_set(error, EINVAL,
2540                         RTE_FLOW_ERROR_TYPE_ACTION,
2541                         act, "Not supported action.");
2542                 return -rte_errno;
2543         }
2544
2545         /* parse attr */
2546         /* must be input direction */
2547         if (!attr->ingress) {
2548                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2549                 rte_flow_error_set(error, EINVAL,
2550                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2551                                    attr, "Only support ingress.");
2552                 return -rte_errno;
2553         }
2554
2555         /* not supported */
2556         if (attr->egress) {
2557                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2558                 rte_flow_error_set(error, EINVAL,
2559                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2560                                    attr, "Not support egress.");
2561                 return -rte_errno;
2562         }
2563
2564         /* not supported */
2565         if (attr->transfer) {
2566                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2567                 rte_flow_error_set(error, EINVAL,
2568                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2569                                    attr, "No support for transfer.");
2570                 return -rte_errno;
2571         }
2572
2573         if (attr->priority > 0xFFFF) {
2574                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2575                 rte_flow_error_set(error, EINVAL,
2576                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2577                                    attr, "Error priority.");
2578                 return -rte_errno;
2579         }
2580
2581         return 0;
2582 }
2583
2584 /* remove the rss filter */
2585 static void
2586 txgbe_clear_rss_filter(struct rte_eth_dev *dev)
2587 {
2588         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
2589
2590         if (filter_info->rss_info.conf.queue_num)
2591                 txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2592 }
2593
2594 void
2595 txgbe_filterlist_init(void)
2596 {
2597         TAILQ_INIT(&filter_ntuple_list);
2598         TAILQ_INIT(&filter_ethertype_list);
2599         TAILQ_INIT(&filter_syn_list);
2600         TAILQ_INIT(&filter_fdir_list);
2601         TAILQ_INIT(&filter_l2_tunnel_list);
2602         TAILQ_INIT(&filter_rss_list);
2603         TAILQ_INIT(&txgbe_flow_list);
2604 }
2605
2606 void
2607 txgbe_filterlist_flush(void)
2608 {
2609         struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2610         struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2611         struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2612         struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2613         struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2614         struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2615         struct txgbe_rss_conf_ele *rss_filter_ptr;
2616
2617         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2618                 TAILQ_REMOVE(&filter_ntuple_list,
2619                                  ntuple_filter_ptr,
2620                                  entries);
2621                 rte_free(ntuple_filter_ptr);
2622         }
2623
2624         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2625                 TAILQ_REMOVE(&filter_ethertype_list,
2626                                  ethertype_filter_ptr,
2627                                  entries);
2628                 rte_free(ethertype_filter_ptr);
2629         }
2630
2631         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2632                 TAILQ_REMOVE(&filter_syn_list,
2633                                  syn_filter_ptr,
2634                                  entries);
2635                 rte_free(syn_filter_ptr);
2636         }
2637
2638         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2639                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2640                                  l2_tn_filter_ptr,
2641                                  entries);
2642                 rte_free(l2_tn_filter_ptr);
2643         }
2644
2645         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2646                 TAILQ_REMOVE(&filter_fdir_list,
2647                                  fdir_rule_ptr,
2648                                  entries);
2649                 rte_free(fdir_rule_ptr);
2650         }
2651
2652         while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2653                 TAILQ_REMOVE(&filter_rss_list,
2654                                  rss_filter_ptr,
2655                                  entries);
2656                 rte_free(rss_filter_ptr);
2657         }
2658
2659         while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
2660                 TAILQ_REMOVE(&txgbe_flow_list,
2661                                  txgbe_flow_mem_ptr,
2662                                  entries);
2663                 rte_free(txgbe_flow_mem_ptr->flow);
2664                 rte_free(txgbe_flow_mem_ptr);
2665         }
2666 }
2667
2668 /**
2669  * Create or destroy a flow rule.
2670  * Theorically one rule can match more than one filters.
2671  * We will let it use the filter which it hit first.
2672  * So, the sequence matters.
2673  */
2674 static struct rte_flow *
2675 txgbe_flow_create(struct rte_eth_dev *dev,
2676                   const struct rte_flow_attr *attr,
2677                   const struct rte_flow_item pattern[],
2678                   const struct rte_flow_action actions[],
2679                   struct rte_flow_error *error)
2680 {
2681         int ret;
2682         struct rte_eth_ntuple_filter ntuple_filter;
2683         struct rte_eth_ethertype_filter ethertype_filter;
2684         struct rte_eth_syn_filter syn_filter;
2685         struct txgbe_fdir_rule fdir_rule;
2686         struct txgbe_l2_tunnel_conf l2_tn_filter;
2687         struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
2688         struct txgbe_rte_flow_rss_conf rss_conf;
2689         struct rte_flow *flow = NULL;
2690         struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2691         struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2692         struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2693         struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2694         struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2695         struct txgbe_rss_conf_ele *rss_filter_ptr;
2696         struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2697         uint8_t first_mask = FALSE;
2698
2699         flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
2700         if (!flow) {
2701                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2702                 return (struct rte_flow *)flow;
2703         }
2704         txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
2705                         sizeof(struct txgbe_flow_mem), 0);
2706         if (!txgbe_flow_mem_ptr) {
2707                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2708                 rte_free(flow);
2709                 return NULL;
2710         }
2711         txgbe_flow_mem_ptr->flow = flow;
2712         TAILQ_INSERT_TAIL(&txgbe_flow_list,
2713                                 txgbe_flow_mem_ptr, entries);
2714
2715         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2716         ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2717                         actions, &ntuple_filter, error);
2718
2719 #ifdef RTE_LIB_SECURITY
2720         /* ESP flow not really a flow*/
2721         if (ntuple_filter.proto == IPPROTO_ESP)
2722                 return flow;
2723 #endif
2724
2725         if (!ret) {
2726                 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2727                 if (!ret) {
2728                         ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
2729                                 sizeof(struct txgbe_ntuple_filter_ele), 0);
2730                         if (!ntuple_filter_ptr) {
2731                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2732                                 goto out;
2733                         }
2734                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2735                                 &ntuple_filter,
2736                                 sizeof(struct rte_eth_ntuple_filter));
2737                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2738                                 ntuple_filter_ptr, entries);
2739                         flow->rule = ntuple_filter_ptr;
2740                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2741                         return flow;
2742                 }
2743                 goto out;
2744         }
2745
2746         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2747         ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2748                                 actions, &ethertype_filter, error);
2749         if (!ret) {
2750                 ret = txgbe_add_del_ethertype_filter(dev,
2751                                 &ethertype_filter, TRUE);
2752                 if (!ret) {
2753                         ethertype_filter_ptr =
2754                                 rte_zmalloc("txgbe_ethertype_filter",
2755                                 sizeof(struct txgbe_ethertype_filter_ele), 0);
2756                         if (!ethertype_filter_ptr) {
2757                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2758                                 goto out;
2759                         }
2760                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2761                                 &ethertype_filter,
2762                                 sizeof(struct rte_eth_ethertype_filter));
2763                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2764                                 ethertype_filter_ptr, entries);
2765                         flow->rule = ethertype_filter_ptr;
2766                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2767                         return flow;
2768                 }
2769                 goto out;
2770         }
2771
2772         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2773         ret = txgbe_parse_syn_filter(dev, attr, pattern,
2774                                 actions, &syn_filter, error);
2775         if (!ret) {
2776                 ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
2777                 if (!ret) {
2778                         syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
2779                                 sizeof(struct txgbe_eth_syn_filter_ele), 0);
2780                         if (!syn_filter_ptr) {
2781                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2782                                 goto out;
2783                         }
2784                         rte_memcpy(&syn_filter_ptr->filter_info,
2785                                 &syn_filter,
2786                                 sizeof(struct rte_eth_syn_filter));
2787                         TAILQ_INSERT_TAIL(&filter_syn_list,
2788                                 syn_filter_ptr,
2789                                 entries);
2790                         flow->rule = syn_filter_ptr;
2791                         flow->filter_type = RTE_ETH_FILTER_SYN;
2792                         return flow;
2793                 }
2794                 goto out;
2795         }
2796
2797         memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2798         ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2799                                 actions, &fdir_rule, error);
2800         if (!ret) {
2801                 /* A mask cannot be deleted. */
2802                 if (fdir_rule.b_mask) {
2803                         if (!fdir_info->mask_added) {
2804                                 /* It's the first time the mask is set. */
2805                                 rte_memcpy(&fdir_info->mask,
2806                                         &fdir_rule.mask,
2807                                         sizeof(struct txgbe_hw_fdir_mask));
2808                                 fdir_info->flex_bytes_offset =
2809                                         fdir_rule.flex_bytes_offset;
2810
2811                                 if (fdir_rule.mask.flex_bytes_mask)
2812                                         txgbe_fdir_set_flexbytes_offset(dev,
2813                                                 fdir_rule.flex_bytes_offset);
2814
2815                                 ret = txgbe_fdir_set_input_mask(dev);
2816                                 if (ret)
2817                                         goto out;
2818
2819                                 fdir_info->mask_added = TRUE;
2820                                 first_mask = TRUE;
2821                         } else {
2822                                 /**
2823                                  * Only support one global mask,
2824                                  * all the masks should be the same.
2825                                  */
2826                                 ret = memcmp(&fdir_info->mask,
2827                                         &fdir_rule.mask,
2828                                         sizeof(struct txgbe_hw_fdir_mask));
2829                                 if (ret)
2830                                         goto out;
2831
2832                                 if (fdir_info->flex_bytes_offset !=
2833                                                 fdir_rule.flex_bytes_offset)
2834                                         goto out;
2835                         }
2836                 }
2837
2838                 if (fdir_rule.b_spec) {
2839                         ret = txgbe_fdir_filter_program(dev, &fdir_rule,
2840                                         FALSE, FALSE);
2841                         if (!ret) {
2842                                 fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
2843                                         sizeof(struct txgbe_fdir_rule_ele), 0);
2844                                 if (!fdir_rule_ptr) {
2845                                         PMD_DRV_LOG(ERR,
2846                                                 "failed to allocate memory");
2847                                         goto out;
2848                                 }
2849                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2850                                         &fdir_rule,
2851                                         sizeof(struct txgbe_fdir_rule));
2852                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2853                                         fdir_rule_ptr, entries);
2854                                 flow->rule = fdir_rule_ptr;
2855                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2856
2857                                 return flow;
2858                         }
2859
2860                         if (ret) {
2861                                 /**
2862                                  * clean the mask_added flag if fail to
2863                                  * program
2864                                  **/
2865                                 if (first_mask)
2866                                         fdir_info->mask_added = FALSE;
2867                                 goto out;
2868                         }
2869                 }
2870
2871                 goto out;
2872         }
2873
2874         memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2875         ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2876                                         actions, &l2_tn_filter, error);
2877         if (!ret) {
2878                 ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2879                 if (!ret) {
2880                         l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
2881                                 sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
2882                         if (!l2_tn_filter_ptr) {
2883                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2884                                 goto out;
2885                         }
2886                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
2887                                 &l2_tn_filter,
2888                                 sizeof(struct txgbe_l2_tunnel_conf));
2889                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2890                                 l2_tn_filter_ptr, entries);
2891                         flow->rule = l2_tn_filter_ptr;
2892                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2893                         return flow;
2894                 }
2895         }
2896
2897         memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2898         ret = txgbe_parse_rss_filter(dev, attr,
2899                                         actions, &rss_conf, error);
2900         if (!ret) {
2901                 ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
2902                 if (!ret) {
2903                         rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
2904                                 sizeof(struct txgbe_rss_conf_ele), 0);
2905                         if (!rss_filter_ptr) {
2906                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2907                                 goto out;
2908                         }
2909                         txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
2910                                             &rss_conf.conf);
2911                         TAILQ_INSERT_TAIL(&filter_rss_list,
2912                                 rss_filter_ptr, entries);
2913                         flow->rule = rss_filter_ptr;
2914                         flow->filter_type = RTE_ETH_FILTER_HASH;
2915                         return flow;
2916                 }
2917         }
2918
2919 out:
2920         TAILQ_REMOVE(&txgbe_flow_list,
2921                 txgbe_flow_mem_ptr, entries);
2922         rte_flow_error_set(error, -ret,
2923                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2924                            "Failed to create flow.");
2925         rte_free(txgbe_flow_mem_ptr);
2926         rte_free(flow);
2927         return NULL;
2928 }
2929
2930 /**
2931  * Check if the flow rule is supported by txgbe.
2932  * It only checks the format. Don't guarantee the rule can be programmed into
2933  * the HW. Because there can be no enough room for the rule.
2934  */
2935 static int
2936 txgbe_flow_validate(struct rte_eth_dev *dev,
2937                 const struct rte_flow_attr *attr,
2938                 const struct rte_flow_item pattern[],
2939                 const struct rte_flow_action actions[],
2940                 struct rte_flow_error *error)
2941 {
2942         struct rte_eth_ntuple_filter ntuple_filter;
2943         struct rte_eth_ethertype_filter ethertype_filter;
2944         struct rte_eth_syn_filter syn_filter;
2945         struct txgbe_l2_tunnel_conf l2_tn_filter;
2946         struct txgbe_fdir_rule fdir_rule;
2947         struct txgbe_rte_flow_rss_conf rss_conf;
2948         int ret = 0;
2949
2950         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2951         ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2952                                 actions, &ntuple_filter, error);
2953         if (!ret)
2954                 return 0;
2955
2956         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2957         ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2958                                 actions, &ethertype_filter, error);
2959         if (!ret)
2960                 return 0;
2961
2962         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2963         ret = txgbe_parse_syn_filter(dev, attr, pattern,
2964                                 actions, &syn_filter, error);
2965         if (!ret)
2966                 return 0;
2967
2968         memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2969         ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2970                                 actions, &fdir_rule, error);
2971         if (!ret)
2972                 return 0;
2973
2974         memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2975         ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2976                                 actions, &l2_tn_filter, error);
2977         if (!ret)
2978                 return 0;
2979
2980         memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2981         ret = txgbe_parse_rss_filter(dev, attr,
2982                                         actions, &rss_conf, error);
2983
2984         return ret;
2985 }
2986
2987 /* Destroy a flow rule on txgbe. */
2988 static int
2989 txgbe_flow_destroy(struct rte_eth_dev *dev,
2990                 struct rte_flow *flow,
2991                 struct rte_flow_error *error)
2992 {
2993         int ret = 0;
2994         struct rte_flow *pmd_flow = flow;
2995         enum rte_filter_type filter_type = pmd_flow->filter_type;
2996         struct rte_eth_ntuple_filter ntuple_filter;
2997         struct rte_eth_ethertype_filter ethertype_filter;
2998         struct rte_eth_syn_filter syn_filter;
2999         struct txgbe_fdir_rule fdir_rule;
3000         struct txgbe_l2_tunnel_conf l2_tn_filter;
3001         struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
3002         struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
3003         struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
3004         struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3005         struct txgbe_fdir_rule_ele *fdir_rule_ptr;
3006         struct txgbe_flow_mem *txgbe_flow_mem_ptr;
3007         struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
3008         struct txgbe_rss_conf_ele *rss_filter_ptr;
3009
3010         switch (filter_type) {
3011         case RTE_ETH_FILTER_NTUPLE:
3012                 ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
3013                                         pmd_flow->rule;
3014                 rte_memcpy(&ntuple_filter,
3015                         &ntuple_filter_ptr->filter_info,
3016                         sizeof(struct rte_eth_ntuple_filter));
3017                 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3018                 if (!ret) {
3019                         TAILQ_REMOVE(&filter_ntuple_list,
3020                         ntuple_filter_ptr, entries);
3021                         rte_free(ntuple_filter_ptr);
3022                 }
3023                 break;
3024         case RTE_ETH_FILTER_ETHERTYPE:
3025                 ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
3026                                         pmd_flow->rule;
3027                 rte_memcpy(&ethertype_filter,
3028                         &ethertype_filter_ptr->filter_info,
3029                         sizeof(struct rte_eth_ethertype_filter));
3030                 ret = txgbe_add_del_ethertype_filter(dev,
3031                                 &ethertype_filter, FALSE);
3032                 if (!ret) {
3033                         TAILQ_REMOVE(&filter_ethertype_list,
3034                                 ethertype_filter_ptr, entries);
3035                         rte_free(ethertype_filter_ptr);
3036                 }
3037                 break;
3038         case RTE_ETH_FILTER_SYN:
3039                 syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
3040                                 pmd_flow->rule;
3041                 rte_memcpy(&syn_filter,
3042                         &syn_filter_ptr->filter_info,
3043                         sizeof(struct rte_eth_syn_filter));
3044                 ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
3045                 if (!ret) {
3046                         TAILQ_REMOVE(&filter_syn_list,
3047                                 syn_filter_ptr, entries);
3048                         rte_free(syn_filter_ptr);
3049                 }
3050                 break;
3051         case RTE_ETH_FILTER_FDIR:
3052                 fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
3053                 rte_memcpy(&fdir_rule,
3054                         &fdir_rule_ptr->filter_info,
3055                         sizeof(struct txgbe_fdir_rule));
3056                 ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3057                 if (!ret) {
3058                         TAILQ_REMOVE(&filter_fdir_list,
3059                                 fdir_rule_ptr, entries);
3060                         rte_free(fdir_rule_ptr);
3061                         if (TAILQ_EMPTY(&filter_fdir_list))
3062                                 fdir_info->mask_added = false;
3063                 }
3064                 break;
3065         case RTE_ETH_FILTER_L2_TUNNEL:
3066                 l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
3067                                 pmd_flow->rule;
3068                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3069                         sizeof(struct txgbe_l2_tunnel_conf));
3070                 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3071                 if (!ret) {
3072                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3073                                 l2_tn_filter_ptr, entries);
3074                         rte_free(l2_tn_filter_ptr);
3075                 }
3076                 break;
3077         case RTE_ETH_FILTER_HASH:
3078                 rss_filter_ptr = (struct txgbe_rss_conf_ele *)
3079                                 pmd_flow->rule;
3080                 ret = txgbe_config_rss_filter(dev,
3081                                         &rss_filter_ptr->filter_info, FALSE);
3082                 if (!ret) {
3083                         TAILQ_REMOVE(&filter_rss_list,
3084                                 rss_filter_ptr, entries);
3085                         rte_free(rss_filter_ptr);
3086                 }
3087                 break;
3088         default:
3089                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3090                             filter_type);
3091                 ret = -EINVAL;
3092                 break;
3093         }
3094
3095         if (ret) {
3096                 rte_flow_error_set(error, EINVAL,
3097                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3098                                 NULL, "Failed to destroy flow");
3099                 return ret;
3100         }
3101
3102         TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
3103                 if (txgbe_flow_mem_ptr->flow == pmd_flow) {
3104                         TAILQ_REMOVE(&txgbe_flow_list,
3105                                 txgbe_flow_mem_ptr, entries);
3106                         rte_free(txgbe_flow_mem_ptr);
3107                 }
3108         }
3109         rte_free(flow);
3110
3111         return ret;
3112 }
3113
3114 /*  Destroy all flow rules associated with a port on txgbe. */
3115 static int
3116 txgbe_flow_flush(struct rte_eth_dev *dev,
3117                 struct rte_flow_error *error)
3118 {
3119         int ret = 0;
3120
3121         txgbe_clear_all_ntuple_filter(dev);
3122         txgbe_clear_all_ethertype_filter(dev);
3123         txgbe_clear_syn_filter(dev);
3124
3125         ret = txgbe_clear_all_fdir_filter(dev);
3126         if (ret < 0) {
3127                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3128                                         NULL, "Failed to flush rule");
3129                 return ret;
3130         }
3131
3132         ret = txgbe_clear_all_l2_tn_filter(dev);
3133         if (ret < 0) {
3134                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3135                                         NULL, "Failed to flush rule");
3136                 return ret;
3137         }
3138
3139         txgbe_clear_rss_filter(dev);
3140
3141         txgbe_filterlist_flush();
3142
3143         return 0;
3144 }
3145
3146 const struct rte_flow_ops txgbe_flow_ops = {
3147         .validate = txgbe_flow_validate,
3148         .create = txgbe_flow_create,
3149         .destroy = txgbe_flow_destroy,
3150         .flush = txgbe_flow_flush,
3151 };
3152