net/txgbe: support destroying consistent filter
[dpdk.git] / drivers / net / txgbe / txgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <sys/queue.h>
6 #include <rte_bus_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_flow.h>
9 #include <rte_flow_driver.h>
10
11 #include "txgbe_ethdev.h"
12
13 #define TXGBE_MIN_N_TUPLE_PRIO 1
14 #define TXGBE_MAX_N_TUPLE_PRIO 7
15 #define TXGBE_MAX_FLX_SOURCE_OFF 62
16
17 /* ntuple filter list structure */
18 struct txgbe_ntuple_filter_ele {
19         TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
20         struct rte_eth_ntuple_filter filter_info;
21 };
22 /* ethertype filter list structure */
23 struct txgbe_ethertype_filter_ele {
24         TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
25         struct rte_eth_ethertype_filter filter_info;
26 };
27 /* syn filter list structure */
28 struct txgbe_eth_syn_filter_ele {
29         TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
30         struct rte_eth_syn_filter filter_info;
31 };
32 /* fdir filter list structure */
33 struct txgbe_fdir_rule_ele {
34         TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
35         struct txgbe_fdir_rule filter_info;
36 };
37 /* l2_tunnel filter list structure */
38 struct txgbe_eth_l2_tunnel_conf_ele {
39         TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
40         struct txgbe_l2_tunnel_conf filter_info;
41 };
42 /* rss filter list structure */
43 struct txgbe_rss_conf_ele {
44         TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
45         struct txgbe_rte_flow_rss_conf filter_info;
46 };
47 /* txgbe_flow memory list structure */
48 struct txgbe_flow_mem {
49         TAILQ_ENTRY(txgbe_flow_mem) entries;
50         struct rte_flow *flow;
51 };
52
53 TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
54 TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
55 TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
56 TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
57 TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
58 TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
59 TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
60
61 static struct txgbe_ntuple_filter_list filter_ntuple_list;
62 static struct txgbe_ethertype_filter_list filter_ethertype_list;
63 static struct txgbe_syn_filter_list filter_syn_list;
64 static struct txgbe_fdir_rule_filter_list filter_fdir_list;
65 static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
66 static struct txgbe_rss_filter_list filter_rss_list;
67 static struct txgbe_flow_mem_list txgbe_flow_list;
68
69 /**
70  * Endless loop will never happen with below assumption
71  * 1. there is at least one no-void item(END)
72  * 2. cur is before END.
73  */
74 static inline
75 const struct rte_flow_item *next_no_void_pattern(
76                 const struct rte_flow_item pattern[],
77                 const struct rte_flow_item *cur)
78 {
79         const struct rte_flow_item *next =
80                 cur ? cur + 1 : &pattern[0];
81         while (1) {
82                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
83                         return next;
84                 next++;
85         }
86 }
87
88 static inline
89 const struct rte_flow_action *next_no_void_action(
90                 const struct rte_flow_action actions[],
91                 const struct rte_flow_action *cur)
92 {
93         const struct rte_flow_action *next =
94                 cur ? cur + 1 : &actions[0];
95         while (1) {
96                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
97                         return next;
98                 next++;
99         }
100 }
101
102 /**
103  * Please aware there's an assumption for all the parsers.
104  * rte_flow_item is using big endian, rte_flow_attr and
105  * rte_flow_action are using CPU order.
106  * Because the pattern is used to describe the packets,
107  * normally the packets should use network order.
108  */
109
110 /**
111  * Parse the rule to see if it is a n-tuple rule.
112  * And get the n-tuple filter info BTW.
113  * pattern:
114  * The first not void item can be ETH or IPV4.
115  * The second not void item must be IPV4 if the first one is ETH.
116  * The third not void item must be UDP or TCP.
117  * The next not void item must be END.
118  * action:
119  * The first not void action should be QUEUE.
120  * The next not void action should be END.
121  * pattern example:
122  * ITEM         Spec                    Mask
123  * ETH          NULL                    NULL
124  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
125  *              dst_addr 192.167.3.50   0xFFFFFFFF
126  *              next_proto_id   17      0xFF
127  * UDP/TCP/     src_port        80      0xFFFF
128  * SCTP         dst_port        80      0xFFFF
129  * END
130  * other members in mask and spec should set to 0x00.
131  * item->last should be NULL.
132  */
133 static int
134 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
135                          const struct rte_flow_item pattern[],
136                          const struct rte_flow_action actions[],
137                          struct rte_eth_ntuple_filter *filter,
138                          struct rte_flow_error *error)
139 {
140         const struct rte_flow_item *item;
141         const struct rte_flow_action *act;
142         const struct rte_flow_item_ipv4 *ipv4_spec;
143         const struct rte_flow_item_ipv4 *ipv4_mask;
144         const struct rte_flow_item_tcp *tcp_spec;
145         const struct rte_flow_item_tcp *tcp_mask;
146         const struct rte_flow_item_udp *udp_spec;
147         const struct rte_flow_item_udp *udp_mask;
148         const struct rte_flow_item_sctp *sctp_spec;
149         const struct rte_flow_item_sctp *sctp_mask;
150         const struct rte_flow_item_eth *eth_spec;
151         const struct rte_flow_item_eth *eth_mask;
152         const struct rte_flow_item_vlan *vlan_spec;
153         const struct rte_flow_item_vlan *vlan_mask;
154         struct rte_flow_item_eth eth_null;
155         struct rte_flow_item_vlan vlan_null;
156
157         if (!pattern) {
158                 rte_flow_error_set(error,
159                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
160                         NULL, "NULL pattern.");
161                 return -rte_errno;
162         }
163
164         if (!actions) {
165                 rte_flow_error_set(error, EINVAL,
166                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
167                                    NULL, "NULL action.");
168                 return -rte_errno;
169         }
170         if (!attr) {
171                 rte_flow_error_set(error, EINVAL,
172                                    RTE_FLOW_ERROR_TYPE_ATTR,
173                                    NULL, "NULL attribute.");
174                 return -rte_errno;
175         }
176
177         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
178         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
179
180         /* the first not void item can be MAC or IPv4 */
181         item = next_no_void_pattern(pattern, NULL);
182
183         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
184             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
185                 rte_flow_error_set(error, EINVAL,
186                         RTE_FLOW_ERROR_TYPE_ITEM,
187                         item, "Not supported by ntuple filter");
188                 return -rte_errno;
189         }
190         /* Skip Ethernet */
191         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
192                 eth_spec = item->spec;
193                 eth_mask = item->mask;
194                 /*Not supported last point for range*/
195                 if (item->last) {
196                         rte_flow_error_set(error,
197                           EINVAL,
198                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
199                           item, "Not supported last point for range");
200                         return -rte_errno;
201                 }
202                 /* if the first item is MAC, the content should be NULL */
203                 if ((item->spec || item->mask) &&
204                         (memcmp(eth_spec, &eth_null,
205                                 sizeof(struct rte_flow_item_eth)) ||
206                          memcmp(eth_mask, &eth_null,
207                                 sizeof(struct rte_flow_item_eth)))) {
208                         rte_flow_error_set(error, EINVAL,
209                                 RTE_FLOW_ERROR_TYPE_ITEM,
210                                 item, "Not supported by ntuple filter");
211                         return -rte_errno;
212                 }
213                 /* check if the next not void item is IPv4 or Vlan */
214                 item = next_no_void_pattern(pattern, item);
215                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
216                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
217                         rte_flow_error_set(error,
218                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
219                                 item, "Not supported by ntuple filter");
220                         return -rte_errno;
221                 }
222         }
223
224         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
225                 vlan_spec = item->spec;
226                 vlan_mask = item->mask;
227                 /*Not supported last point for range*/
228                 if (item->last) {
229                         rte_flow_error_set(error,
230                                 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
231                                 item, "Not supported last point for range");
232                         return -rte_errno;
233                 }
234                 /* the content should be NULL */
235                 if ((item->spec || item->mask) &&
236                         (memcmp(vlan_spec, &vlan_null,
237                                 sizeof(struct rte_flow_item_vlan)) ||
238                          memcmp(vlan_mask, &vlan_null,
239                                 sizeof(struct rte_flow_item_vlan)))) {
240                         rte_flow_error_set(error, EINVAL,
241                                 RTE_FLOW_ERROR_TYPE_ITEM,
242                                 item, "Not supported by ntuple filter");
243                         return -rte_errno;
244                 }
245                 /* check if the next not void item is IPv4 */
246                 item = next_no_void_pattern(pattern, item);
247                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
248                         rte_flow_error_set(error,
249                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
250                           item, "Not supported by ntuple filter");
251                         return -rte_errno;
252                 }
253         }
254
255         if (item->mask) {
256                 /* get the IPv4 info */
257                 if (!item->spec || !item->mask) {
258                         rte_flow_error_set(error, EINVAL,
259                                 RTE_FLOW_ERROR_TYPE_ITEM,
260                                 item, "Invalid ntuple mask");
261                         return -rte_errno;
262                 }
263                 /*Not supported last point for range*/
264                 if (item->last) {
265                         rte_flow_error_set(error, EINVAL,
266                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
267                                 item, "Not supported last point for range");
268                         return -rte_errno;
269                 }
270
271                 ipv4_mask = item->mask;
272                 /**
273                  * Only support src & dst addresses, protocol,
274                  * others should be masked.
275                  */
276                 if (ipv4_mask->hdr.version_ihl ||
277                     ipv4_mask->hdr.type_of_service ||
278                     ipv4_mask->hdr.total_length ||
279                     ipv4_mask->hdr.packet_id ||
280                     ipv4_mask->hdr.fragment_offset ||
281                     ipv4_mask->hdr.time_to_live ||
282                     ipv4_mask->hdr.hdr_checksum) {
283                         rte_flow_error_set(error,
284                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
285                                 item, "Not supported by ntuple filter");
286                         return -rte_errno;
287                 }
288                 if ((ipv4_mask->hdr.src_addr != 0 &&
289                         ipv4_mask->hdr.src_addr != UINT32_MAX) ||
290                         (ipv4_mask->hdr.dst_addr != 0 &&
291                         ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
292                         (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
293                         ipv4_mask->hdr.next_proto_id != 0)) {
294                         rte_flow_error_set(error,
295                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
296                                 item, "Not supported by ntuple filter");
297                         return -rte_errno;
298                 }
299
300                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
301                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
302                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
303
304                 ipv4_spec = item->spec;
305                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
306                 filter->src_ip = ipv4_spec->hdr.src_addr;
307                 filter->proto  = ipv4_spec->hdr.next_proto_id;
308         }
309
310         /* check if the next not void item is TCP or UDP */
311         item = next_no_void_pattern(pattern, item);
312         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
313             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
314             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
315             item->type != RTE_FLOW_ITEM_TYPE_END) {
316                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
317                 rte_flow_error_set(error, EINVAL,
318                         RTE_FLOW_ERROR_TYPE_ITEM,
319                         item, "Not supported by ntuple filter");
320                 return -rte_errno;
321         }
322
323         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
324                 (!item->spec && !item->mask)) {
325                 goto action;
326         }
327
328         /* get the TCP/UDP/SCTP info */
329         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
330                 (!item->spec || !item->mask)) {
331                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
332                 rte_flow_error_set(error, EINVAL,
333                         RTE_FLOW_ERROR_TYPE_ITEM,
334                         item, "Invalid ntuple mask");
335                 return -rte_errno;
336         }
337
338         /*Not supported last point for range*/
339         if (item->last) {
340                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
341                 rte_flow_error_set(error, EINVAL,
342                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
343                         item, "Not supported last point for range");
344                 return -rte_errno;
345         }
346
347         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
348                 tcp_mask = item->mask;
349
350                 /**
351                  * Only support src & dst ports, tcp flags,
352                  * others should be masked.
353                  */
354                 if (tcp_mask->hdr.sent_seq ||
355                     tcp_mask->hdr.recv_ack ||
356                     tcp_mask->hdr.data_off ||
357                     tcp_mask->hdr.rx_win ||
358                     tcp_mask->hdr.cksum ||
359                     tcp_mask->hdr.tcp_urp) {
360                         memset(filter, 0,
361                                 sizeof(struct rte_eth_ntuple_filter));
362                         rte_flow_error_set(error, EINVAL,
363                                 RTE_FLOW_ERROR_TYPE_ITEM,
364                                 item, "Not supported by ntuple filter");
365                         return -rte_errno;
366                 }
367                 if ((tcp_mask->hdr.src_port != 0 &&
368                         tcp_mask->hdr.src_port != UINT16_MAX) ||
369                         (tcp_mask->hdr.dst_port != 0 &&
370                         tcp_mask->hdr.dst_port != UINT16_MAX)) {
371                         rte_flow_error_set(error,
372                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
373                                 item, "Not supported by ntuple filter");
374                         return -rte_errno;
375                 }
376
377                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
378                 filter->src_port_mask  = tcp_mask->hdr.src_port;
379                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
380                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
381                 } else if (!tcp_mask->hdr.tcp_flags) {
382                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
383                 } else {
384                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
385                         rte_flow_error_set(error, EINVAL,
386                                 RTE_FLOW_ERROR_TYPE_ITEM,
387                                 item, "Not supported by ntuple filter");
388                         return -rte_errno;
389                 }
390
391                 tcp_spec = item->spec;
392                 filter->dst_port  = tcp_spec->hdr.dst_port;
393                 filter->src_port  = tcp_spec->hdr.src_port;
394                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
395         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
396                 udp_mask = item->mask;
397
398                 /**
399                  * Only support src & dst ports,
400                  * others should be masked.
401                  */
402                 if (udp_mask->hdr.dgram_len ||
403                     udp_mask->hdr.dgram_cksum) {
404                         memset(filter, 0,
405                                 sizeof(struct rte_eth_ntuple_filter));
406                         rte_flow_error_set(error, EINVAL,
407                                 RTE_FLOW_ERROR_TYPE_ITEM,
408                                 item, "Not supported by ntuple filter");
409                         return -rte_errno;
410                 }
411                 if ((udp_mask->hdr.src_port != 0 &&
412                         udp_mask->hdr.src_port != UINT16_MAX) ||
413                         (udp_mask->hdr.dst_port != 0 &&
414                         udp_mask->hdr.dst_port != UINT16_MAX)) {
415                         rte_flow_error_set(error,
416                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
417                                 item, "Not supported by ntuple filter");
418                         return -rte_errno;
419                 }
420
421                 filter->dst_port_mask = udp_mask->hdr.dst_port;
422                 filter->src_port_mask = udp_mask->hdr.src_port;
423
424                 udp_spec = item->spec;
425                 filter->dst_port = udp_spec->hdr.dst_port;
426                 filter->src_port = udp_spec->hdr.src_port;
427         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
428                 sctp_mask = item->mask;
429
430                 /**
431                  * Only support src & dst ports,
432                  * others should be masked.
433                  */
434                 if (sctp_mask->hdr.tag ||
435                     sctp_mask->hdr.cksum) {
436                         memset(filter, 0,
437                                 sizeof(struct rte_eth_ntuple_filter));
438                         rte_flow_error_set(error, EINVAL,
439                                 RTE_FLOW_ERROR_TYPE_ITEM,
440                                 item, "Not supported by ntuple filter");
441                         return -rte_errno;
442                 }
443
444                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
445                 filter->src_port_mask = sctp_mask->hdr.src_port;
446
447                 sctp_spec = item->spec;
448                 filter->dst_port = sctp_spec->hdr.dst_port;
449                 filter->src_port = sctp_spec->hdr.src_port;
450         } else {
451                 goto action;
452         }
453
454         /* check if the next not void item is END */
455         item = next_no_void_pattern(pattern, item);
456         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
457                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
458                 rte_flow_error_set(error, EINVAL,
459                         RTE_FLOW_ERROR_TYPE_ITEM,
460                         item, "Not supported by ntuple filter");
461                 return -rte_errno;
462         }
463
464 action:
465
466         /**
467          * n-tuple only supports forwarding,
468          * check if the first not void action is QUEUE.
469          */
470         act = next_no_void_action(actions, NULL);
471         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
472                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473                 rte_flow_error_set(error, EINVAL,
474                         RTE_FLOW_ERROR_TYPE_ACTION,
475                         item, "Not supported action.");
476                 return -rte_errno;
477         }
478         filter->queue =
479                 ((const struct rte_flow_action_queue *)act->conf)->index;
480
481         /* check if the next not void item is END */
482         act = next_no_void_action(actions, act);
483         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
484                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
485                 rte_flow_error_set(error, EINVAL,
486                         RTE_FLOW_ERROR_TYPE_ACTION,
487                         act, "Not supported action.");
488                 return -rte_errno;
489         }
490
491         /* parse attr */
492         /* must be input direction */
493         if (!attr->ingress) {
494                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
495                 rte_flow_error_set(error, EINVAL,
496                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
497                                    attr, "Only support ingress.");
498                 return -rte_errno;
499         }
500
501         /* not supported */
502         if (attr->egress) {
503                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
504                 rte_flow_error_set(error, EINVAL,
505                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
506                                    attr, "Not support egress.");
507                 return -rte_errno;
508         }
509
510         /* not supported */
511         if (attr->transfer) {
512                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
513                 rte_flow_error_set(error, EINVAL,
514                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
515                                    attr, "No support for transfer.");
516                 return -rte_errno;
517         }
518
519         if (attr->priority > 0xFFFF) {
520                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
521                 rte_flow_error_set(error, EINVAL,
522                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
523                                    attr, "Error priority.");
524                 return -rte_errno;
525         }
526         filter->priority = (uint16_t)attr->priority;
527         if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
528                 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
529                 filter->priority = 1;
530
531         return 0;
532 }
533
534 /* a specific function for txgbe because the flags is specific */
535 static int
536 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
537                           const struct rte_flow_attr *attr,
538                           const struct rte_flow_item pattern[],
539                           const struct rte_flow_action actions[],
540                           struct rte_eth_ntuple_filter *filter,
541                           struct rte_flow_error *error)
542 {
543         int ret;
544
545         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
546
547         if (ret)
548                 return ret;
549
550         /* txgbe doesn't support tcp flags */
551         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
552                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
553                 rte_flow_error_set(error, EINVAL,
554                                    RTE_FLOW_ERROR_TYPE_ITEM,
555                                    NULL, "Not supported by ntuple filter");
556                 return -rte_errno;
557         }
558
559         /* txgbe doesn't support many priorities */
560         if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
561             filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
562                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
563                 rte_flow_error_set(error, EINVAL,
564                         RTE_FLOW_ERROR_TYPE_ITEM,
565                         NULL, "Priority not supported by ntuple filter");
566                 return -rte_errno;
567         }
568
569         if (filter->queue >= dev->data->nb_rx_queues) {
570                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
571                 rte_flow_error_set(error, EINVAL,
572                                    RTE_FLOW_ERROR_TYPE_ITEM,
573                                    NULL, "Not supported by ntuple filter");
574                 return -rte_errno;
575         }
576
577         /* fixed value for txgbe */
578         filter->flags = RTE_5TUPLE_FLAGS;
579         return 0;
580 }
581
582 /**
583  * Parse the rule to see if it is a ethertype rule.
584  * And get the ethertype filter info BTW.
585  * pattern:
586  * The first not void item can be ETH.
587  * The next not void item must be END.
588  * action:
589  * The first not void action should be QUEUE.
590  * The next not void action should be END.
591  * pattern example:
592  * ITEM         Spec                    Mask
593  * ETH          type    0x0807          0xFFFF
594  * END
595  * other members in mask and spec should set to 0x00.
596  * item->last should be NULL.
597  */
598 static int
599 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
600                             const struct rte_flow_item *pattern,
601                             const struct rte_flow_action *actions,
602                             struct rte_eth_ethertype_filter *filter,
603                             struct rte_flow_error *error)
604 {
605         const struct rte_flow_item *item;
606         const struct rte_flow_action *act;
607         const struct rte_flow_item_eth *eth_spec;
608         const struct rte_flow_item_eth *eth_mask;
609         const struct rte_flow_action_queue *act_q;
610
611         if (!pattern) {
612                 rte_flow_error_set(error, EINVAL,
613                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
614                                 NULL, "NULL pattern.");
615                 return -rte_errno;
616         }
617
618         if (!actions) {
619                 rte_flow_error_set(error, EINVAL,
620                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
621                                 NULL, "NULL action.");
622                 return -rte_errno;
623         }
624
625         if (!attr) {
626                 rte_flow_error_set(error, EINVAL,
627                                    RTE_FLOW_ERROR_TYPE_ATTR,
628                                    NULL, "NULL attribute.");
629                 return -rte_errno;
630         }
631
632         item = next_no_void_pattern(pattern, NULL);
633         /* The first non-void item should be MAC. */
634         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
635                 rte_flow_error_set(error, EINVAL,
636                         RTE_FLOW_ERROR_TYPE_ITEM,
637                         item, "Not supported by ethertype filter");
638                 return -rte_errno;
639         }
640
641         /*Not supported last point for range*/
642         if (item->last) {
643                 rte_flow_error_set(error, EINVAL,
644                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
645                         item, "Not supported last point for range");
646                 return -rte_errno;
647         }
648
649         /* Get the MAC info. */
650         if (!item->spec || !item->mask) {
651                 rte_flow_error_set(error, EINVAL,
652                                 RTE_FLOW_ERROR_TYPE_ITEM,
653                                 item, "Not supported by ethertype filter");
654                 return -rte_errno;
655         }
656
657         eth_spec = item->spec;
658         eth_mask = item->mask;
659
660         /* Mask bits of source MAC address must be full of 0.
661          * Mask bits of destination MAC address must be full
662          * of 1 or full of 0.
663          */
664         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
665             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
666              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
667                 rte_flow_error_set(error, EINVAL,
668                                 RTE_FLOW_ERROR_TYPE_ITEM,
669                                 item, "Invalid ether address mask");
670                 return -rte_errno;
671         }
672
673         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
674                 rte_flow_error_set(error, EINVAL,
675                                 RTE_FLOW_ERROR_TYPE_ITEM,
676                                 item, "Invalid ethertype mask");
677                 return -rte_errno;
678         }
679
680         /* If mask bits of destination MAC address
681          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
682          */
683         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
684                 filter->mac_addr = eth_spec->dst;
685                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
686         } else {
687                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
688         }
689         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
690
691         /* Check if the next non-void item is END. */
692         item = next_no_void_pattern(pattern, item);
693         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
694                 rte_flow_error_set(error, EINVAL,
695                                 RTE_FLOW_ERROR_TYPE_ITEM,
696                                 item, "Not supported by ethertype filter.");
697                 return -rte_errno;
698         }
699
700         /* Parse action */
701
702         act = next_no_void_action(actions, NULL);
703         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
704             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
705                 rte_flow_error_set(error, EINVAL,
706                                 RTE_FLOW_ERROR_TYPE_ACTION,
707                                 act, "Not supported action.");
708                 return -rte_errno;
709         }
710
711         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
712                 act_q = (const struct rte_flow_action_queue *)act->conf;
713                 filter->queue = act_q->index;
714         } else {
715                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
716         }
717
718         /* Check if the next non-void item is END */
719         act = next_no_void_action(actions, act);
720         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
721                 rte_flow_error_set(error, EINVAL,
722                                 RTE_FLOW_ERROR_TYPE_ACTION,
723                                 act, "Not supported action.");
724                 return -rte_errno;
725         }
726
727         /* Parse attr */
728         /* Must be input direction */
729         if (!attr->ingress) {
730                 rte_flow_error_set(error, EINVAL,
731                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
732                                 attr, "Only support ingress.");
733                 return -rte_errno;
734         }
735
736         /* Not supported */
737         if (attr->egress) {
738                 rte_flow_error_set(error, EINVAL,
739                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
740                                 attr, "Not support egress.");
741                 return -rte_errno;
742         }
743
744         /* Not supported */
745         if (attr->transfer) {
746                 rte_flow_error_set(error, EINVAL,
747                                 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
748                                 attr, "No support for transfer.");
749                 return -rte_errno;
750         }
751
752         /* Not supported */
753         if (attr->priority) {
754                 rte_flow_error_set(error, EINVAL,
755                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
756                                 attr, "Not support priority.");
757                 return -rte_errno;
758         }
759
760         /* Not supported */
761         if (attr->group) {
762                 rte_flow_error_set(error, EINVAL,
763                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
764                                 attr, "Not support group.");
765                 return -rte_errno;
766         }
767
768         return 0;
769 }
770
771 static int
772 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
773                              const struct rte_flow_attr *attr,
774                              const struct rte_flow_item pattern[],
775                              const struct rte_flow_action actions[],
776                              struct rte_eth_ethertype_filter *filter,
777                              struct rte_flow_error *error)
778 {
779         int ret;
780
781         ret = cons_parse_ethertype_filter(attr, pattern,
782                                         actions, filter, error);
783
784         if (ret)
785                 return ret;
786
787         if (filter->queue >= dev->data->nb_rx_queues) {
788                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
789                 rte_flow_error_set(error, EINVAL,
790                         RTE_FLOW_ERROR_TYPE_ITEM,
791                         NULL, "queue index much too big");
792                 return -rte_errno;
793         }
794
795         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
796                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
797                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
798                 rte_flow_error_set(error, EINVAL,
799                         RTE_FLOW_ERROR_TYPE_ITEM,
800                         NULL, "IPv4/IPv6 not supported by ethertype filter");
801                 return -rte_errno;
802         }
803
804         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
805                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
806                 rte_flow_error_set(error, EINVAL,
807                         RTE_FLOW_ERROR_TYPE_ITEM,
808                         NULL, "mac compare is unsupported");
809                 return -rte_errno;
810         }
811
812         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
813                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
814                 rte_flow_error_set(error, EINVAL,
815                         RTE_FLOW_ERROR_TYPE_ITEM,
816                         NULL, "drop option is unsupported");
817                 return -rte_errno;
818         }
819
820         return 0;
821 }
822
823 /**
824  * Parse the rule to see if it is a TCP SYN rule.
825  * And get the TCP SYN filter info BTW.
826  * pattern:
827  * The first not void item must be ETH.
828  * The second not void item must be IPV4 or IPV6.
829  * The third not void item must be TCP.
830  * The next not void item must be END.
831  * action:
832  * The first not void action should be QUEUE.
833  * The next not void action should be END.
834  * pattern example:
835  * ITEM         Spec                    Mask
836  * ETH          NULL                    NULL
837  * IPV4/IPV6    NULL                    NULL
838  * TCP          tcp_flags       0x02    0xFF
839  * END
840  * other members in mask and spec should set to 0x00.
841  * item->last should be NULL.
842  */
843 static int
844 cons_parse_syn_filter(const struct rte_flow_attr *attr,
845                                 const struct rte_flow_item pattern[],
846                                 const struct rte_flow_action actions[],
847                                 struct rte_eth_syn_filter *filter,
848                                 struct rte_flow_error *error)
849 {
850         const struct rte_flow_item *item;
851         const struct rte_flow_action *act;
852         const struct rte_flow_item_tcp *tcp_spec;
853         const struct rte_flow_item_tcp *tcp_mask;
854         const struct rte_flow_action_queue *act_q;
855
856         if (!pattern) {
857                 rte_flow_error_set(error, EINVAL,
858                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
859                                 NULL, "NULL pattern.");
860                 return -rte_errno;
861         }
862
863         if (!actions) {
864                 rte_flow_error_set(error, EINVAL,
865                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
866                                 NULL, "NULL action.");
867                 return -rte_errno;
868         }
869
870         if (!attr) {
871                 rte_flow_error_set(error, EINVAL,
872                                    RTE_FLOW_ERROR_TYPE_ATTR,
873                                    NULL, "NULL attribute.");
874                 return -rte_errno;
875         }
876
877
878         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
879         item = next_no_void_pattern(pattern, NULL);
880         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
881             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
882             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
883             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
884                 rte_flow_error_set(error, EINVAL,
885                                 RTE_FLOW_ERROR_TYPE_ITEM,
886                                 item, "Not supported by syn filter");
887                 return -rte_errno;
888         }
889                 /*Not supported last point for range*/
890         if (item->last) {
891                 rte_flow_error_set(error, EINVAL,
892                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
893                         item, "Not supported last point for range");
894                 return -rte_errno;
895         }
896
897         /* Skip Ethernet */
898         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
899                 /* if the item is MAC, the content should be NULL */
900                 if (item->spec || item->mask) {
901                         rte_flow_error_set(error, EINVAL,
902                                 RTE_FLOW_ERROR_TYPE_ITEM,
903                                 item, "Invalid SYN address mask");
904                         return -rte_errno;
905                 }
906
907                 /* check if the next not void item is IPv4 or IPv6 */
908                 item = next_no_void_pattern(pattern, item);
909                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
910                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
911                         rte_flow_error_set(error, EINVAL,
912                                 RTE_FLOW_ERROR_TYPE_ITEM,
913                                 item, "Not supported by syn filter");
914                         return -rte_errno;
915                 }
916         }
917
918         /* Skip IP */
919         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
920             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
921                 /* if the item is IP, the content should be NULL */
922                 if (item->spec || item->mask) {
923                         rte_flow_error_set(error, EINVAL,
924                                 RTE_FLOW_ERROR_TYPE_ITEM,
925                                 item, "Invalid SYN mask");
926                         return -rte_errno;
927                 }
928
929                 /* check if the next not void item is TCP */
930                 item = next_no_void_pattern(pattern, item);
931                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
932                         rte_flow_error_set(error, EINVAL,
933                                 RTE_FLOW_ERROR_TYPE_ITEM,
934                                 item, "Not supported by syn filter");
935                         return -rte_errno;
936                 }
937         }
938
939         /* Get the TCP info. Only support SYN. */
940         if (!item->spec || !item->mask) {
941                 rte_flow_error_set(error, EINVAL,
942                                 RTE_FLOW_ERROR_TYPE_ITEM,
943                                 item, "Invalid SYN mask");
944                 return -rte_errno;
945         }
946         /*Not supported last point for range*/
947         if (item->last) {
948                 rte_flow_error_set(error, EINVAL,
949                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
950                         item, "Not supported last point for range");
951                 return -rte_errno;
952         }
953
954         tcp_spec = item->spec;
955         tcp_mask = item->mask;
956         if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
957             tcp_mask->hdr.src_port ||
958             tcp_mask->hdr.dst_port ||
959             tcp_mask->hdr.sent_seq ||
960             tcp_mask->hdr.recv_ack ||
961             tcp_mask->hdr.data_off ||
962             tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
963             tcp_mask->hdr.rx_win ||
964             tcp_mask->hdr.cksum ||
965             tcp_mask->hdr.tcp_urp) {
966                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
967                 rte_flow_error_set(error, EINVAL,
968                                 RTE_FLOW_ERROR_TYPE_ITEM,
969                                 item, "Not supported by syn filter");
970                 return -rte_errno;
971         }
972
973         /* check if the next not void item is END */
974         item = next_no_void_pattern(pattern, item);
975         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
976                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
977                 rte_flow_error_set(error, EINVAL,
978                                 RTE_FLOW_ERROR_TYPE_ITEM,
979                                 item, "Not supported by syn filter");
980                 return -rte_errno;
981         }
982
983         /* check if the first not void action is QUEUE. */
984         act = next_no_void_action(actions, NULL);
985         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
986                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
987                 rte_flow_error_set(error, EINVAL,
988                                 RTE_FLOW_ERROR_TYPE_ACTION,
989                                 act, "Not supported action.");
990                 return -rte_errno;
991         }
992
993         act_q = (const struct rte_flow_action_queue *)act->conf;
994         filter->queue = act_q->index;
995         if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
996                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
997                 rte_flow_error_set(error, EINVAL,
998                                 RTE_FLOW_ERROR_TYPE_ACTION,
999                                 act, "Not supported action.");
1000                 return -rte_errno;
1001         }
1002
1003         /* check if the next not void item is END */
1004         act = next_no_void_action(actions, act);
1005         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1006                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1007                 rte_flow_error_set(error, EINVAL,
1008                                 RTE_FLOW_ERROR_TYPE_ACTION,
1009                                 act, "Not supported action.");
1010                 return -rte_errno;
1011         }
1012
1013         /* parse attr */
1014         /* must be input direction */
1015         if (!attr->ingress) {
1016                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1017                 rte_flow_error_set(error, EINVAL,
1018                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1019                         attr, "Only support ingress.");
1020                 return -rte_errno;
1021         }
1022
1023         /* not supported */
1024         if (attr->egress) {
1025                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1026                 rte_flow_error_set(error, EINVAL,
1027                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1028                         attr, "Not support egress.");
1029                 return -rte_errno;
1030         }
1031
1032         /* not supported */
1033         if (attr->transfer) {
1034                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1035                 rte_flow_error_set(error, EINVAL,
1036                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1037                         attr, "No support for transfer.");
1038                 return -rte_errno;
1039         }
1040
1041         /* Support 2 priorities, the lowest or highest. */
1042         if (!attr->priority) {
1043                 filter->hig_pri = 0;
1044         } else if (attr->priority == (uint32_t)~0U) {
1045                 filter->hig_pri = 1;
1046         } else {
1047                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1048                 rte_flow_error_set(error, EINVAL,
1049                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1050                         attr, "Not support priority.");
1051                 return -rte_errno;
1052         }
1053
1054         return 0;
1055 }
1056
1057 static int
1058 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1059                              const struct rte_flow_attr *attr,
1060                              const struct rte_flow_item pattern[],
1061                              const struct rte_flow_action actions[],
1062                              struct rte_eth_syn_filter *filter,
1063                              struct rte_flow_error *error)
1064 {
1065         int ret;
1066
1067         ret = cons_parse_syn_filter(attr, pattern,
1068                                         actions, filter, error);
1069
1070         if (filter->queue >= dev->data->nb_rx_queues)
1071                 return -rte_errno;
1072
1073         if (ret)
1074                 return ret;
1075
1076         return 0;
1077 }
1078
1079 /**
1080  * Parse the rule to see if it is a L2 tunnel rule.
1081  * And get the L2 tunnel filter info BTW.
1082  * Only support E-tag now.
1083  * pattern:
1084  * The first not void item can be E_TAG.
1085  * The next not void item must be END.
1086  * action:
1087  * The first not void action should be VF or PF.
1088  * The next not void action should be END.
1089  * pattern example:
1090  * ITEM         Spec                    Mask
1091  * E_TAG        grp             0x1     0x3
1092                 e_cid_base      0x309   0xFFF
1093  * END
1094  * other members in mask and spec should set to 0x00.
1095  * item->last should be NULL.
1096  */
1097 static int
1098 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1099                         const struct rte_flow_attr *attr,
1100                         const struct rte_flow_item pattern[],
1101                         const struct rte_flow_action actions[],
1102                         struct txgbe_l2_tunnel_conf *filter,
1103                         struct rte_flow_error *error)
1104 {
1105         const struct rte_flow_item *item;
1106         const struct rte_flow_item_e_tag *e_tag_spec;
1107         const struct rte_flow_item_e_tag *e_tag_mask;
1108         const struct rte_flow_action *act;
1109         const struct rte_flow_action_vf *act_vf;
1110         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1111
1112         if (!pattern) {
1113                 rte_flow_error_set(error, EINVAL,
1114                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1115                         NULL, "NULL pattern.");
1116                 return -rte_errno;
1117         }
1118
1119         if (!actions) {
1120                 rte_flow_error_set(error, EINVAL,
1121                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1122                                    NULL, "NULL action.");
1123                 return -rte_errno;
1124         }
1125
1126         if (!attr) {
1127                 rte_flow_error_set(error, EINVAL,
1128                                    RTE_FLOW_ERROR_TYPE_ATTR,
1129                                    NULL, "NULL attribute.");
1130                 return -rte_errno;
1131         }
1132
1133         /* The first not void item should be e-tag. */
1134         item = next_no_void_pattern(pattern, NULL);
1135         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1136                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1137                 rte_flow_error_set(error, EINVAL,
1138                         RTE_FLOW_ERROR_TYPE_ITEM,
1139                         item, "Not supported by L2 tunnel filter");
1140                 return -rte_errno;
1141         }
1142
1143         if (!item->spec || !item->mask) {
1144                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1145                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1146                         item, "Not supported by L2 tunnel filter");
1147                 return -rte_errno;
1148         }
1149
1150         /*Not supported last point for range*/
1151         if (item->last) {
1152                 rte_flow_error_set(error, EINVAL,
1153                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1154                         item, "Not supported last point for range");
1155                 return -rte_errno;
1156         }
1157
1158         e_tag_spec = item->spec;
1159         e_tag_mask = item->mask;
1160
1161         /* Only care about GRP and E cid base. */
1162         if (e_tag_mask->epcp_edei_in_ecid_b ||
1163             e_tag_mask->in_ecid_e ||
1164             e_tag_mask->ecid_e ||
1165             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1166                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1167                 rte_flow_error_set(error, EINVAL,
1168                         RTE_FLOW_ERROR_TYPE_ITEM,
1169                         item, "Not supported by L2 tunnel filter");
1170                 return -rte_errno;
1171         }
1172
1173         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1174         /**
1175          * grp and e_cid_base are bit fields and only use 14 bits.
1176          * e-tag id is taken as little endian by HW.
1177          */
1178         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1179
1180         /* check if the next not void item is END */
1181         item = next_no_void_pattern(pattern, item);
1182         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1183                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1184                 rte_flow_error_set(error, EINVAL,
1185                         RTE_FLOW_ERROR_TYPE_ITEM,
1186                         item, "Not supported by L2 tunnel filter");
1187                 return -rte_errno;
1188         }
1189
1190         /* parse attr */
1191         /* must be input direction */
1192         if (!attr->ingress) {
1193                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1194                 rte_flow_error_set(error, EINVAL,
1195                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1196                         attr, "Only support ingress.");
1197                 return -rte_errno;
1198         }
1199
1200         /* not supported */
1201         if (attr->egress) {
1202                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1203                 rte_flow_error_set(error, EINVAL,
1204                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1205                         attr, "Not support egress.");
1206                 return -rte_errno;
1207         }
1208
1209         /* not supported */
1210         if (attr->transfer) {
1211                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1212                 rte_flow_error_set(error, EINVAL,
1213                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1214                         attr, "No support for transfer.");
1215                 return -rte_errno;
1216         }
1217
1218         /* not supported */
1219         if (attr->priority) {
1220                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1221                 rte_flow_error_set(error, EINVAL,
1222                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1223                         attr, "Not support priority.");
1224                 return -rte_errno;
1225         }
1226
1227         /* check if the first not void action is VF or PF. */
1228         act = next_no_void_action(actions, NULL);
1229         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1230                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1231                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1232                 rte_flow_error_set(error, EINVAL,
1233                         RTE_FLOW_ERROR_TYPE_ACTION,
1234                         act, "Not supported action.");
1235                 return -rte_errno;
1236         }
1237
1238         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1239                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1240                 filter->pool = act_vf->id;
1241         } else {
1242                 filter->pool = pci_dev->max_vfs;
1243         }
1244
1245         /* check if the next not void item is END */
1246         act = next_no_void_action(actions, act);
1247         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1248                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1249                 rte_flow_error_set(error, EINVAL,
1250                         RTE_FLOW_ERROR_TYPE_ACTION,
1251                         act, "Not supported action.");
1252                 return -rte_errno;
1253         }
1254
1255         return 0;
1256 }
1257
1258 static int
1259 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1260                         const struct rte_flow_attr *attr,
1261                         const struct rte_flow_item pattern[],
1262                         const struct rte_flow_action actions[],
1263                         struct txgbe_l2_tunnel_conf *l2_tn_filter,
1264                         struct rte_flow_error *error)
1265 {
1266         int ret = 0;
1267         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1268         uint16_t vf_num;
1269
1270         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1271                                 actions, l2_tn_filter, error);
1272
1273         vf_num = pci_dev->max_vfs;
1274
1275         if (l2_tn_filter->pool > vf_num)
1276                 return -rte_errno;
1277
1278         return ret;
1279 }
1280
1281 /* Parse to get the attr and action info of flow director rule. */
1282 static int
1283 txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1284                           const struct rte_flow_action actions[],
1285                           struct txgbe_fdir_rule *rule,
1286                           struct rte_flow_error *error)
1287 {
1288         const struct rte_flow_action *act;
1289         const struct rte_flow_action_queue *act_q;
1290         const struct rte_flow_action_mark *mark;
1291
1292         /* parse attr */
1293         /* must be input direction */
1294         if (!attr->ingress) {
1295                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1296                 rte_flow_error_set(error, EINVAL,
1297                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1298                         attr, "Only support ingress.");
1299                 return -rte_errno;
1300         }
1301
1302         /* not supported */
1303         if (attr->egress) {
1304                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1305                 rte_flow_error_set(error, EINVAL,
1306                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1307                         attr, "Not support egress.");
1308                 return -rte_errno;
1309         }
1310
1311         /* not supported */
1312         if (attr->transfer) {
1313                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1314                 rte_flow_error_set(error, EINVAL,
1315                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1316                         attr, "No support for transfer.");
1317                 return -rte_errno;
1318         }
1319
1320         /* not supported */
1321         if (attr->priority) {
1322                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1323                 rte_flow_error_set(error, EINVAL,
1324                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1325                         attr, "Not support priority.");
1326                 return -rte_errno;
1327         }
1328
1329         /* check if the first not void action is QUEUE or DROP. */
1330         act = next_no_void_action(actions, NULL);
1331         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1332             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1333                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1334                 rte_flow_error_set(error, EINVAL,
1335                         RTE_FLOW_ERROR_TYPE_ACTION,
1336                         act, "Not supported action.");
1337                 return -rte_errno;
1338         }
1339
1340         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1341                 act_q = (const struct rte_flow_action_queue *)act->conf;
1342                 rule->queue = act_q->index;
1343         } else { /* drop */
1344                 /* signature mode does not support drop action. */
1345                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1346                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1347                         rte_flow_error_set(error, EINVAL,
1348                                 RTE_FLOW_ERROR_TYPE_ACTION,
1349                                 act, "Not supported action.");
1350                         return -rte_errno;
1351                 }
1352                 rule->fdirflags = TXGBE_FDIRPICMD_DROP;
1353         }
1354
1355         /* check if the next not void item is MARK */
1356         act = next_no_void_action(actions, act);
1357         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1358                 act->type != RTE_FLOW_ACTION_TYPE_END) {
1359                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1360                 rte_flow_error_set(error, EINVAL,
1361                         RTE_FLOW_ERROR_TYPE_ACTION,
1362                         act, "Not supported action.");
1363                 return -rte_errno;
1364         }
1365
1366         rule->soft_id = 0;
1367
1368         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1369                 mark = (const struct rte_flow_action_mark *)act->conf;
1370                 rule->soft_id = mark->id;
1371                 act = next_no_void_action(actions, act);
1372         }
1373
1374         /* check if the next not void item is END */
1375         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1376                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1377                 rte_flow_error_set(error, EINVAL,
1378                         RTE_FLOW_ERROR_TYPE_ACTION,
1379                         act, "Not supported action.");
1380                 return -rte_errno;
1381         }
1382
1383         return 0;
1384 }
1385
1386 /* search next no void pattern and skip fuzzy */
1387 static inline
1388 const struct rte_flow_item *next_no_fuzzy_pattern(
1389                 const struct rte_flow_item pattern[],
1390                 const struct rte_flow_item *cur)
1391 {
1392         const struct rte_flow_item *next =
1393                 next_no_void_pattern(pattern, cur);
1394         while (1) {
1395                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1396                         return next;
1397                 next = next_no_void_pattern(pattern, next);
1398         }
1399 }
1400
1401 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1402 {
1403         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1404         const struct rte_flow_item *item;
1405         uint32_t sh, lh, mh;
1406         int i = 0;
1407
1408         while (1) {
1409                 item = pattern + i;
1410                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1411                         break;
1412
1413                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1414                         spec = item->spec;
1415                         last = item->last;
1416                         mask = item->mask;
1417
1418                         if (!spec || !mask)
1419                                 return 0;
1420
1421                         sh = spec->thresh;
1422
1423                         if (!last)
1424                                 lh = sh;
1425                         else
1426                                 lh = last->thresh;
1427
1428                         mh = mask->thresh;
1429                         sh = sh & mh;
1430                         lh = lh & mh;
1431
1432                         if (!sh || sh > lh)
1433                                 return 0;
1434
1435                         return 1;
1436                 }
1437
1438                 i++;
1439         }
1440
1441         return 0;
1442 }
1443
1444 /**
1445  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1446  * And get the flow director filter info BTW.
1447  * UDP/TCP/SCTP PATTERN:
1448  * The first not void item can be ETH or IPV4 or IPV6
1449  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1450  * The next not void item could be UDP or TCP or SCTP (optional)
1451  * The next not void item could be RAW (for flexbyte, optional)
1452  * The next not void item must be END.
1453  * A Fuzzy Match pattern can appear at any place before END.
1454  * Fuzzy Match is optional for IPV4 but is required for IPV6
1455  * MAC VLAN PATTERN:
1456  * The first not void item must be ETH.
1457  * The second not void item must be MAC VLAN.
1458  * The next not void item must be END.
1459  * ACTION:
1460  * The first not void action should be QUEUE or DROP.
1461  * The second not void optional action should be MARK,
1462  * mark_id is a uint32_t number.
1463  * The next not void action should be END.
1464  * UDP/TCP/SCTP pattern example:
1465  * ITEM         Spec                    Mask
1466  * ETH          NULL                    NULL
1467  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1468  *              dst_addr 192.167.3.50   0xFFFFFFFF
1469  * UDP/TCP/SCTP src_port        80      0xFFFF
1470  *              dst_port        80      0xFFFF
1471  * FLEX relative        0       0x1
1472  *              search          0       0x1
1473  *              reserved        0       0
1474  *              offset          12      0xFFFFFFFF
1475  *              limit           0       0xFFFF
1476  *              length          2       0xFFFF
1477  *              pattern[0]      0x86    0xFF
1478  *              pattern[1]      0xDD    0xFF
1479  * END
1480  * MAC VLAN pattern example:
1481  * ITEM         Spec                    Mask
1482  * ETH          dst_addr
1483                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1484                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1485  * MAC VLAN     tci     0x2016          0xEFFF
1486  * END
1487  * Other members in mask and spec should set to 0x00.
1488  * Item->last should be NULL.
1489  */
1490 static int
1491 txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
1492                                const struct rte_flow_attr *attr,
1493                                const struct rte_flow_item pattern[],
1494                                const struct rte_flow_action actions[],
1495                                struct txgbe_fdir_rule *rule,
1496                                struct rte_flow_error *error)
1497 {
1498         const struct rte_flow_item *item;
1499         const struct rte_flow_item_eth *eth_mask;
1500         const struct rte_flow_item_ipv4 *ipv4_spec;
1501         const struct rte_flow_item_ipv4 *ipv4_mask;
1502         const struct rte_flow_item_ipv6 *ipv6_spec;
1503         const struct rte_flow_item_ipv6 *ipv6_mask;
1504         const struct rte_flow_item_tcp *tcp_spec;
1505         const struct rte_flow_item_tcp *tcp_mask;
1506         const struct rte_flow_item_udp *udp_spec;
1507         const struct rte_flow_item_udp *udp_mask;
1508         const struct rte_flow_item_sctp *sctp_spec;
1509         const struct rte_flow_item_sctp *sctp_mask;
1510         const struct rte_flow_item_raw *raw_mask;
1511         const struct rte_flow_item_raw *raw_spec;
1512         u32 ptype = 0;
1513         uint8_t j;
1514
1515         if (!pattern) {
1516                 rte_flow_error_set(error, EINVAL,
1517                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1518                         NULL, "NULL pattern.");
1519                 return -rte_errno;
1520         }
1521
1522         if (!actions) {
1523                 rte_flow_error_set(error, EINVAL,
1524                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1525                                    NULL, "NULL action.");
1526                 return -rte_errno;
1527         }
1528
1529         if (!attr) {
1530                 rte_flow_error_set(error, EINVAL,
1531                                    RTE_FLOW_ERROR_TYPE_ATTR,
1532                                    NULL, "NULL attribute.");
1533                 return -rte_errno;
1534         }
1535
1536         /**
1537          * Some fields may not be provided. Set spec to 0 and mask to default
1538          * value. So, we need not do anything for the not provided fields later.
1539          */
1540         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1541         memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
1542         rule->mask.vlan_tci_mask = 0;
1543         rule->mask.flex_bytes_mask = 0;
1544
1545         /**
1546          * The first not void item should be
1547          * MAC or IPv4 or TCP or UDP or SCTP.
1548          */
1549         item = next_no_fuzzy_pattern(pattern, NULL);
1550         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1551             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1552             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1553             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1554             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1555             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1556                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1557                 rte_flow_error_set(error, EINVAL,
1558                         RTE_FLOW_ERROR_TYPE_ITEM,
1559                         item, "Not supported by fdir filter");
1560                 return -rte_errno;
1561         }
1562
1563         if (signature_match(pattern))
1564                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1565         else
1566                 rule->mode = RTE_FDIR_MODE_PERFECT;
1567
1568         /*Not supported last point for range*/
1569         if (item->last) {
1570                 rte_flow_error_set(error, EINVAL,
1571                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1572                         item, "Not supported last point for range");
1573                 return -rte_errno;
1574         }
1575
1576         /* Get the MAC info. */
1577         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1578                 /**
1579                  * Only support vlan and dst MAC address,
1580                  * others should be masked.
1581                  */
1582                 if (item->spec && !item->mask) {
1583                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1584                         rte_flow_error_set(error, EINVAL,
1585                                 RTE_FLOW_ERROR_TYPE_ITEM,
1586                                 item, "Not supported by fdir filter");
1587                         return -rte_errno;
1588                 }
1589
1590                 if (item->mask) {
1591                         rule->b_mask = TRUE;
1592                         eth_mask = item->mask;
1593
1594                         /* Ether type should be masked. */
1595                         if (eth_mask->type ||
1596                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1597                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1598                                 rte_flow_error_set(error, EINVAL,
1599                                         RTE_FLOW_ERROR_TYPE_ITEM,
1600                                         item, "Not supported by fdir filter");
1601                                 return -rte_errno;
1602                         }
1603
1604                         /* If ethernet has meaning, it means MAC VLAN mode. */
1605                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1606
1607                         /**
1608                          * src MAC address must be masked,
1609                          * and don't support dst MAC address mask.
1610                          */
1611                         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1612                                 if (eth_mask->src.addr_bytes[j] ||
1613                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1614                                         memset(rule, 0,
1615                                         sizeof(struct txgbe_fdir_rule));
1616                                         rte_flow_error_set(error, EINVAL,
1617                                         RTE_FLOW_ERROR_TYPE_ITEM,
1618                                         item, "Not supported by fdir filter");
1619                                         return -rte_errno;
1620                                 }
1621                         }
1622
1623                         /* When no VLAN, considered as full mask. */
1624                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1625                 }
1626                 /*** If both spec and mask are item,
1627                  * it means don't care about ETH.
1628                  * Do nothing.
1629                  */
1630
1631                 /**
1632                  * Check if the next not void item is vlan or ipv4.
1633                  * IPv6 is not supported.
1634                  */
1635                 item = next_no_fuzzy_pattern(pattern, item);
1636                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1637                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1638                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1639                                 rte_flow_error_set(error, EINVAL,
1640                                         RTE_FLOW_ERROR_TYPE_ITEM,
1641                                         item, "Not supported by fdir filter");
1642                                 return -rte_errno;
1643                         }
1644                 } else {
1645                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1646                                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1647                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1648                                 rte_flow_error_set(error, EINVAL,
1649                                         RTE_FLOW_ERROR_TYPE_ITEM,
1650                                         item, "Not supported by fdir filter");
1651                                 return -rte_errno;
1652                         }
1653                 }
1654         }
1655
1656         /* Get the IPV4 info. */
1657         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1658                 /**
1659                  * Set the flow type even if there's no content
1660                  * as we must have a flow type.
1661                  */
1662                 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
1663                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
1664                 /*Not supported last point for range*/
1665                 if (item->last) {
1666                         rte_flow_error_set(error, EINVAL,
1667                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1668                                 item, "Not supported last point for range");
1669                         return -rte_errno;
1670                 }
1671                 /**
1672                  * Only care about src & dst addresses,
1673                  * others should be masked.
1674                  */
1675                 if (!item->mask) {
1676                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1677                         rte_flow_error_set(error, EINVAL,
1678                                 RTE_FLOW_ERROR_TYPE_ITEM,
1679                                 item, "Not supported by fdir filter");
1680                         return -rte_errno;
1681                 }
1682                 rule->b_mask = TRUE;
1683                 ipv4_mask = item->mask;
1684                 if (ipv4_mask->hdr.version_ihl ||
1685                     ipv4_mask->hdr.type_of_service ||
1686                     ipv4_mask->hdr.total_length ||
1687                     ipv4_mask->hdr.packet_id ||
1688                     ipv4_mask->hdr.fragment_offset ||
1689                     ipv4_mask->hdr.time_to_live ||
1690                     ipv4_mask->hdr.next_proto_id ||
1691                     ipv4_mask->hdr.hdr_checksum) {
1692                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1693                         rte_flow_error_set(error, EINVAL,
1694                                 RTE_FLOW_ERROR_TYPE_ITEM,
1695                                 item, "Not supported by fdir filter");
1696                         return -rte_errno;
1697                 }
1698                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1699                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1700
1701                 if (item->spec) {
1702                         rule->b_spec = TRUE;
1703                         ipv4_spec = item->spec;
1704                         rule->input.dst_ip[0] =
1705                                 ipv4_spec->hdr.dst_addr;
1706                         rule->input.src_ip[0] =
1707                                 ipv4_spec->hdr.src_addr;
1708                 }
1709
1710                 /**
1711                  * Check if the next not void item is
1712                  * TCP or UDP or SCTP or END.
1713                  */
1714                 item = next_no_fuzzy_pattern(pattern, item);
1715                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1716                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1717                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1718                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1719                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1720                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1721                         rte_flow_error_set(error, EINVAL,
1722                                 RTE_FLOW_ERROR_TYPE_ITEM,
1723                                 item, "Not supported by fdir filter");
1724                         return -rte_errno;
1725                 }
1726         }
1727
1728         /* Get the IPV6 info. */
1729         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1730                 /**
1731                  * Set the flow type even if there's no content
1732                  * as we must have a flow type.
1733                  */
1734                 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
1735                 ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
1736
1737                 /**
1738                  * 1. must signature match
1739                  * 2. not support last
1740                  * 3. mask must not null
1741                  */
1742                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1743                     item->last ||
1744                     !item->mask) {
1745                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1746                         rte_flow_error_set(error, EINVAL,
1747                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1748                                 item, "Not supported last point for range");
1749                         return -rte_errno;
1750                 }
1751
1752                 rule->b_mask = TRUE;
1753                 ipv6_mask = item->mask;
1754                 if (ipv6_mask->hdr.vtc_flow ||
1755                     ipv6_mask->hdr.payload_len ||
1756                     ipv6_mask->hdr.proto ||
1757                     ipv6_mask->hdr.hop_limits) {
1758                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1759                         rte_flow_error_set(error, EINVAL,
1760                                 RTE_FLOW_ERROR_TYPE_ITEM,
1761                                 item, "Not supported by fdir filter");
1762                         return -rte_errno;
1763                 }
1764
1765                 /* check src addr mask */
1766                 for (j = 0; j < 16; j++) {
1767                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1768                                 rule->mask.src_ipv6_mask |= 1 << j;
1769                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1770                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1771                                 rte_flow_error_set(error, EINVAL,
1772                                         RTE_FLOW_ERROR_TYPE_ITEM,
1773                                         item, "Not supported by fdir filter");
1774                                 return -rte_errno;
1775                         }
1776                 }
1777
1778                 /* check dst addr mask */
1779                 for (j = 0; j < 16; j++) {
1780                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1781                                 rule->mask.dst_ipv6_mask |= 1 << j;
1782                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1783                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1784                                 rte_flow_error_set(error, EINVAL,
1785                                         RTE_FLOW_ERROR_TYPE_ITEM,
1786                                         item, "Not supported by fdir filter");
1787                                 return -rte_errno;
1788                         }
1789                 }
1790
1791                 if (item->spec) {
1792                         rule->b_spec = TRUE;
1793                         ipv6_spec = item->spec;
1794                         rte_memcpy(rule->input.src_ip,
1795                                    ipv6_spec->hdr.src_addr, 16);
1796                         rte_memcpy(rule->input.dst_ip,
1797                                    ipv6_spec->hdr.dst_addr, 16);
1798                 }
1799
1800                 /**
1801                  * Check if the next not void item is
1802                  * TCP or UDP or SCTP or END.
1803                  */
1804                 item = next_no_fuzzy_pattern(pattern, item);
1805                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1806                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1807                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1808                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1809                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1810                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1811                         rte_flow_error_set(error, EINVAL,
1812                                 RTE_FLOW_ERROR_TYPE_ITEM,
1813                                 item, "Not supported by fdir filter");
1814                         return -rte_errno;
1815                 }
1816         }
1817
1818         /* Get the TCP info. */
1819         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1820                 /**
1821                  * Set the flow type even if there's no content
1822                  * as we must have a flow type.
1823                  */
1824                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
1825                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
1826                 /*Not supported last point for range*/
1827                 if (item->last) {
1828                         rte_flow_error_set(error, EINVAL,
1829                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1830                                 item, "Not supported last point for range");
1831                         return -rte_errno;
1832                 }
1833                 /**
1834                  * Only care about src & dst ports,
1835                  * others should be masked.
1836                  */
1837                 if (!item->mask) {
1838                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1839                         rte_flow_error_set(error, EINVAL,
1840                                 RTE_FLOW_ERROR_TYPE_ITEM,
1841                                 item, "Not supported by fdir filter");
1842                         return -rte_errno;
1843                 }
1844                 rule->b_mask = TRUE;
1845                 tcp_mask = item->mask;
1846                 if (tcp_mask->hdr.sent_seq ||
1847                     tcp_mask->hdr.recv_ack ||
1848                     tcp_mask->hdr.data_off ||
1849                     tcp_mask->hdr.tcp_flags ||
1850                     tcp_mask->hdr.rx_win ||
1851                     tcp_mask->hdr.cksum ||
1852                     tcp_mask->hdr.tcp_urp) {
1853                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1854                         rte_flow_error_set(error, EINVAL,
1855                                 RTE_FLOW_ERROR_TYPE_ITEM,
1856                                 item, "Not supported by fdir filter");
1857                         return -rte_errno;
1858                 }
1859                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1860                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1861
1862                 if (item->spec) {
1863                         rule->b_spec = TRUE;
1864                         tcp_spec = item->spec;
1865                         rule->input.src_port =
1866                                 tcp_spec->hdr.src_port;
1867                         rule->input.dst_port =
1868                                 tcp_spec->hdr.dst_port;
1869                 }
1870
1871                 item = next_no_fuzzy_pattern(pattern, item);
1872                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1873                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1874                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1875                         rte_flow_error_set(error, EINVAL,
1876                                 RTE_FLOW_ERROR_TYPE_ITEM,
1877                                 item, "Not supported by fdir filter");
1878                         return -rte_errno;
1879                 }
1880         }
1881
1882         /* Get the UDP info */
1883         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1884                 /**
1885                  * Set the flow type even if there's no content
1886                  * as we must have a flow type.
1887                  */
1888                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
1889                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
1890                 /*Not supported last point for range*/
1891                 if (item->last) {
1892                         rte_flow_error_set(error, EINVAL,
1893                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1894                                 item, "Not supported last point for range");
1895                         return -rte_errno;
1896                 }
1897                 /**
1898                  * Only care about src & dst ports,
1899                  * others should be masked.
1900                  */
1901                 if (!item->mask) {
1902                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1903                         rte_flow_error_set(error, EINVAL,
1904                                 RTE_FLOW_ERROR_TYPE_ITEM,
1905                                 item, "Not supported by fdir filter");
1906                         return -rte_errno;
1907                 }
1908                 rule->b_mask = TRUE;
1909                 udp_mask = item->mask;
1910                 if (udp_mask->hdr.dgram_len ||
1911                     udp_mask->hdr.dgram_cksum) {
1912                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1913                         rte_flow_error_set(error, EINVAL,
1914                                 RTE_FLOW_ERROR_TYPE_ITEM,
1915                                 item, "Not supported by fdir filter");
1916                         return -rte_errno;
1917                 }
1918                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1919                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1920
1921                 if (item->spec) {
1922                         rule->b_spec = TRUE;
1923                         udp_spec = item->spec;
1924                         rule->input.src_port =
1925                                 udp_spec->hdr.src_port;
1926                         rule->input.dst_port =
1927                                 udp_spec->hdr.dst_port;
1928                 }
1929
1930                 item = next_no_fuzzy_pattern(pattern, item);
1931                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1932                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1933                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1934                         rte_flow_error_set(error, EINVAL,
1935                                 RTE_FLOW_ERROR_TYPE_ITEM,
1936                                 item, "Not supported by fdir filter");
1937                         return -rte_errno;
1938                 }
1939         }
1940
1941         /* Get the SCTP info */
1942         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1943                 /**
1944                  * Set the flow type even if there's no content
1945                  * as we must have a flow type.
1946                  */
1947                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
1948                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
1949                 /*Not supported last point for range*/
1950                 if (item->last) {
1951                         rte_flow_error_set(error, EINVAL,
1952                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1953                                 item, "Not supported last point for range");
1954                         return -rte_errno;
1955                 }
1956
1957                 /**
1958                  * Only care about src & dst ports,
1959                  * others should be masked.
1960                  */
1961                 if (!item->mask) {
1962                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1963                         rte_flow_error_set(error, EINVAL,
1964                                 RTE_FLOW_ERROR_TYPE_ITEM,
1965                                 item, "Not supported by fdir filter");
1966                         return -rte_errno;
1967                 }
1968                 rule->b_mask = TRUE;
1969                 sctp_mask = item->mask;
1970                 if (sctp_mask->hdr.tag ||
1971                         sctp_mask->hdr.cksum) {
1972                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1973                         rte_flow_error_set(error, EINVAL,
1974                                 RTE_FLOW_ERROR_TYPE_ITEM,
1975                                 item, "Not supported by fdir filter");
1976                         return -rte_errno;
1977                 }
1978                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1979                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1980
1981                 if (item->spec) {
1982                         rule->b_spec = TRUE;
1983                         sctp_spec = item->spec;
1984                         rule->input.src_port =
1985                                 sctp_spec->hdr.src_port;
1986                         rule->input.dst_port =
1987                                 sctp_spec->hdr.dst_port;
1988                 }
1989                 /* others even sctp port is not supported */
1990                 sctp_mask = item->mask;
1991                 if (sctp_mask &&
1992                         (sctp_mask->hdr.src_port ||
1993                          sctp_mask->hdr.dst_port ||
1994                          sctp_mask->hdr.tag ||
1995                          sctp_mask->hdr.cksum)) {
1996                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1997                         rte_flow_error_set(error, EINVAL,
1998                                 RTE_FLOW_ERROR_TYPE_ITEM,
1999                                 item, "Not supported by fdir filter");
2000                         return -rte_errno;
2001                 }
2002
2003                 item = next_no_fuzzy_pattern(pattern, item);
2004                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2005                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2006                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2007                         rte_flow_error_set(error, EINVAL,
2008                                 RTE_FLOW_ERROR_TYPE_ITEM,
2009                                 item, "Not supported by fdir filter");
2010                         return -rte_errno;
2011                 }
2012         }
2013
2014         /* Get the flex byte info */
2015         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2016                 /* Not supported last point for range*/
2017                 if (item->last) {
2018                         rte_flow_error_set(error, EINVAL,
2019                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2020                                 item, "Not supported last point for range");
2021                         return -rte_errno;
2022                 }
2023                 /* mask should not be null */
2024                 if (!item->mask || !item->spec) {
2025                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2026                         rte_flow_error_set(error, EINVAL,
2027                                 RTE_FLOW_ERROR_TYPE_ITEM,
2028                                 item, "Not supported by fdir filter");
2029                         return -rte_errno;
2030                 }
2031
2032                 raw_mask = item->mask;
2033
2034                 /* check mask */
2035                 if (raw_mask->relative != 0x1 ||
2036                     raw_mask->search != 0x1 ||
2037                     raw_mask->reserved != 0x0 ||
2038                     (uint32_t)raw_mask->offset != 0xffffffff ||
2039                     raw_mask->limit != 0xffff ||
2040                     raw_mask->length != 0xffff) {
2041                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2042                         rte_flow_error_set(error, EINVAL,
2043                                 RTE_FLOW_ERROR_TYPE_ITEM,
2044                                 item, "Not supported by fdir filter");
2045                         return -rte_errno;
2046                 }
2047
2048                 raw_spec = item->spec;
2049
2050                 /* check spec */
2051                 if (raw_spec->relative != 0 ||
2052                     raw_spec->search != 0 ||
2053                     raw_spec->reserved != 0 ||
2054                     raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
2055                     raw_spec->offset % 2 ||
2056                     raw_spec->limit != 0 ||
2057                     raw_spec->length != 2 ||
2058                     /* pattern can't be 0xffff */
2059                     (raw_spec->pattern[0] == 0xff &&
2060                      raw_spec->pattern[1] == 0xff)) {
2061                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2062                         rte_flow_error_set(error, EINVAL,
2063                                 RTE_FLOW_ERROR_TYPE_ITEM,
2064                                 item, "Not supported by fdir filter");
2065                         return -rte_errno;
2066                 }
2067
2068                 /* check pattern mask */
2069                 if (raw_mask->pattern[0] != 0xff ||
2070                     raw_mask->pattern[1] != 0xff) {
2071                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2072                         rte_flow_error_set(error, EINVAL,
2073                                 RTE_FLOW_ERROR_TYPE_ITEM,
2074                                 item, "Not supported by fdir filter");
2075                         return -rte_errno;
2076                 }
2077
2078                 rule->mask.flex_bytes_mask = 0xffff;
2079                 rule->input.flex_bytes =
2080                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2081                         raw_spec->pattern[0];
2082                 rule->flex_bytes_offset = raw_spec->offset;
2083         }
2084
2085         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2086                 /* check if the next not void item is END */
2087                 item = next_no_fuzzy_pattern(pattern, item);
2088                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2089                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2090                         rte_flow_error_set(error, EINVAL,
2091                                 RTE_FLOW_ERROR_TYPE_ITEM,
2092                                 item, "Not supported by fdir filter");
2093                         return -rte_errno;
2094                 }
2095         }
2096
2097         rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
2098
2099         return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2100 }
2101
2102 /**
2103  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2104  * And get the flow director filter info BTW.
2105  * VxLAN PATTERN:
2106  * The first not void item must be ETH.
2107  * The second not void item must be IPV4/ IPV6.
2108  * The third not void item must be NVGRE.
2109  * The next not void item must be END.
2110  * NVGRE PATTERN:
2111  * The first not void item must be ETH.
2112  * The second not void item must be IPV4/ IPV6.
2113  * The third not void item must be NVGRE.
2114  * The next not void item must be END.
2115  * ACTION:
2116  * The first not void action should be QUEUE or DROP.
2117  * The second not void optional action should be MARK,
2118  * mark_id is a uint32_t number.
2119  * The next not void action should be END.
2120  * VxLAN pattern example:
2121  * ITEM         Spec                    Mask
2122  * ETH          NULL                    NULL
2123  * IPV4/IPV6    NULL                    NULL
2124  * UDP          NULL                    NULL
2125  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2126  * MAC VLAN     tci     0x2016          0xEFFF
2127  * END
2128  * NEGRV pattern example:
2129  * ITEM         Spec                    Mask
2130  * ETH          NULL                    NULL
2131  * IPV4/IPV6    NULL                    NULL
2132  * NVGRE        protocol        0x6558  0xFFFF
2133  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2134  * MAC VLAN     tci     0x2016          0xEFFF
2135  * END
2136  * other members in mask and spec should set to 0x00.
2137  * item->last should be NULL.
2138  */
2139 static int
2140 txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2141                                const struct rte_flow_item pattern[],
2142                                const struct rte_flow_action actions[],
2143                                struct txgbe_fdir_rule *rule,
2144                                struct rte_flow_error *error)
2145 {
2146         const struct rte_flow_item *item;
2147         const struct rte_flow_item_eth *eth_mask;
2148         uint32_t j;
2149
2150         if (!pattern) {
2151                 rte_flow_error_set(error, EINVAL,
2152                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2153                                    NULL, "NULL pattern.");
2154                 return -rte_errno;
2155         }
2156
2157         if (!actions) {
2158                 rte_flow_error_set(error, EINVAL,
2159                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2160                                    NULL, "NULL action.");
2161                 return -rte_errno;
2162         }
2163
2164         if (!attr) {
2165                 rte_flow_error_set(error, EINVAL,
2166                                    RTE_FLOW_ERROR_TYPE_ATTR,
2167                                    NULL, "NULL attribute.");
2168                 return -rte_errno;
2169         }
2170
2171         /**
2172          * Some fields may not be provided. Set spec to 0 and mask to default
2173          * value. So, we need not do anything for the not provided fields later.
2174          */
2175         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2176         memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
2177         rule->mask.vlan_tci_mask = 0;
2178
2179         /**
2180          * The first not void item should be
2181          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2182          */
2183         item = next_no_void_pattern(pattern, NULL);
2184         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2185             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2186             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2187             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2188             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2189             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2190                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2191                 rte_flow_error_set(error, EINVAL,
2192                         RTE_FLOW_ERROR_TYPE_ITEM,
2193                         item, "Not supported by fdir filter");
2194                 return -rte_errno;
2195         }
2196
2197         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2198
2199         /* Skip MAC. */
2200         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2201                 /* Only used to describe the protocol stack. */
2202                 if (item->spec || item->mask) {
2203                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2204                         rte_flow_error_set(error, EINVAL,
2205                                 RTE_FLOW_ERROR_TYPE_ITEM,
2206                                 item, "Not supported by fdir filter");
2207                         return -rte_errno;
2208                 }
2209                 /* Not supported last point for range*/
2210                 if (item->last) {
2211                         rte_flow_error_set(error, EINVAL,
2212                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2213                                 item, "Not supported last point for range");
2214                         return -rte_errno;
2215                 }
2216
2217                 /* Check if the next not void item is IPv4 or IPv6. */
2218                 item = next_no_void_pattern(pattern, item);
2219                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2220                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2221                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2222                         rte_flow_error_set(error, EINVAL,
2223                                 RTE_FLOW_ERROR_TYPE_ITEM,
2224                                 item, "Not supported by fdir filter");
2225                         return -rte_errno;
2226                 }
2227         }
2228
2229         /* Skip IP. */
2230         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2231             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2232                 /* Only used to describe the protocol stack. */
2233                 if (item->spec || item->mask) {
2234                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2235                         rte_flow_error_set(error, EINVAL,
2236                                 RTE_FLOW_ERROR_TYPE_ITEM,
2237                                 item, "Not supported by fdir filter");
2238                         return -rte_errno;
2239                 }
2240                 /*Not supported last point for range*/
2241                 if (item->last) {
2242                         rte_flow_error_set(error, EINVAL,
2243                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2244                                 item, "Not supported last point for range");
2245                         return -rte_errno;
2246                 }
2247
2248                 /* Check if the next not void item is UDP or NVGRE. */
2249                 item = next_no_void_pattern(pattern, item);
2250                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2251                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2252                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2253                         rte_flow_error_set(error, EINVAL,
2254                                 RTE_FLOW_ERROR_TYPE_ITEM,
2255                                 item, "Not supported by fdir filter");
2256                         return -rte_errno;
2257                 }
2258         }
2259
2260         /* Skip UDP. */
2261         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2262                 /* Only used to describe the protocol stack. */
2263                 if (item->spec || item->mask) {
2264                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2265                         rte_flow_error_set(error, EINVAL,
2266                                 RTE_FLOW_ERROR_TYPE_ITEM,
2267                                 item, "Not supported by fdir filter");
2268                         return -rte_errno;
2269                 }
2270                 /*Not supported last point for range*/
2271                 if (item->last) {
2272                         rte_flow_error_set(error, EINVAL,
2273                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2274                                 item, "Not supported last point for range");
2275                         return -rte_errno;
2276                 }
2277
2278                 /* Check if the next not void item is VxLAN. */
2279                 item = next_no_void_pattern(pattern, item);
2280                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2281                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2282                         rte_flow_error_set(error, EINVAL,
2283                                 RTE_FLOW_ERROR_TYPE_ITEM,
2284                                 item, "Not supported by fdir filter");
2285                         return -rte_errno;
2286                 }
2287         }
2288
2289         /* check if the next not void item is MAC */
2290         item = next_no_void_pattern(pattern, item);
2291         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2292                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2293                 rte_flow_error_set(error, EINVAL,
2294                         RTE_FLOW_ERROR_TYPE_ITEM,
2295                         item, "Not supported by fdir filter");
2296                 return -rte_errno;
2297         }
2298
2299         /**
2300          * Only support vlan and dst MAC address,
2301          * others should be masked.
2302          */
2303
2304         if (!item->mask) {
2305                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2306                 rte_flow_error_set(error, EINVAL,
2307                         RTE_FLOW_ERROR_TYPE_ITEM,
2308                         item, "Not supported by fdir filter");
2309                 return -rte_errno;
2310         }
2311         /*Not supported last point for range*/
2312         if (item->last) {
2313                 rte_flow_error_set(error, EINVAL,
2314                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2315                         item, "Not supported last point for range");
2316                 return -rte_errno;
2317         }
2318         rule->b_mask = TRUE;
2319         eth_mask = item->mask;
2320
2321         /* Ether type should be masked. */
2322         if (eth_mask->type) {
2323                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2324                 rte_flow_error_set(error, EINVAL,
2325                         RTE_FLOW_ERROR_TYPE_ITEM,
2326                         item, "Not supported by fdir filter");
2327                 return -rte_errno;
2328         }
2329
2330         /* src MAC address should be masked. */
2331         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2332                 if (eth_mask->src.addr_bytes[j]) {
2333                         memset(rule, 0,
2334                                sizeof(struct txgbe_fdir_rule));
2335                         rte_flow_error_set(error, EINVAL,
2336                                 RTE_FLOW_ERROR_TYPE_ITEM,
2337                                 item, "Not supported by fdir filter");
2338                         return -rte_errno;
2339                 }
2340         }
2341         rule->mask.mac_addr_byte_mask = 0;
2342         for (j = 0; j < ETH_ADDR_LEN; j++) {
2343                 /* It's a per byte mask. */
2344                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2345                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2346                 } else if (eth_mask->dst.addr_bytes[j]) {
2347                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2348                         rte_flow_error_set(error, EINVAL,
2349                                 RTE_FLOW_ERROR_TYPE_ITEM,
2350                                 item, "Not supported by fdir filter");
2351                         return -rte_errno;
2352                 }
2353         }
2354
2355         /* When no vlan, considered as full mask. */
2356         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2357
2358         /**
2359          * Check if the next not void item is vlan or ipv4.
2360          * IPv6 is not supported.
2361          */
2362         item = next_no_void_pattern(pattern, item);
2363         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
2364                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2365                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2366                 rte_flow_error_set(error, EINVAL,
2367                         RTE_FLOW_ERROR_TYPE_ITEM,
2368                         item, "Not supported by fdir filter");
2369                 return -rte_errno;
2370         }
2371         /*Not supported last point for range*/
2372         if (item->last) {
2373                 rte_flow_error_set(error, EINVAL,
2374                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2375                         item, "Not supported last point for range");
2376                 return -rte_errno;
2377         }
2378
2379         /**
2380          * If the tags is 0, it means don't care about the VLAN.
2381          * Do nothing.
2382          */
2383
2384         return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2385 }
2386
2387 static int
2388 txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2389                         const struct rte_flow_attr *attr,
2390                         const struct rte_flow_item pattern[],
2391                         const struct rte_flow_action actions[],
2392                         struct txgbe_fdir_rule *rule,
2393                         struct rte_flow_error *error)
2394 {
2395         int ret;
2396         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2397         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2398
2399         ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
2400                                         actions, rule, error);
2401         if (!ret)
2402                 goto step_next;
2403
2404         ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
2405                                         actions, rule, error);
2406         if (ret)
2407                 return ret;
2408
2409 step_next:
2410
2411         if (hw->mac.type == txgbe_mac_raptor &&
2412                 rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
2413                 (rule->input.src_port != 0 || rule->input.dst_port != 0))
2414                 return -ENOTSUP;
2415
2416         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2417             fdir_mode != rule->mode)
2418                 return -ENOTSUP;
2419
2420         if (rule->queue >= dev->data->nb_rx_queues)
2421                 return -ENOTSUP;
2422
2423         return ret;
2424 }
2425
2426 static int
2427 txgbe_parse_rss_filter(struct rte_eth_dev *dev,
2428                         const struct rte_flow_attr *attr,
2429                         const struct rte_flow_action actions[],
2430                         struct txgbe_rte_flow_rss_conf *rss_conf,
2431                         struct rte_flow_error *error)
2432 {
2433         const struct rte_flow_action *act;
2434         const struct rte_flow_action_rss *rss;
2435         uint16_t n;
2436
2437         /**
2438          * rss only supports forwarding,
2439          * check if the first not void action is RSS.
2440          */
2441         act = next_no_void_action(actions, NULL);
2442         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2443                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2444                 rte_flow_error_set(error, EINVAL,
2445                         RTE_FLOW_ERROR_TYPE_ACTION,
2446                         act, "Not supported action.");
2447                 return -rte_errno;
2448         }
2449
2450         rss = (const struct rte_flow_action_rss *)act->conf;
2451
2452         if (!rss || !rss->queue_num) {
2453                 rte_flow_error_set(error, EINVAL,
2454                                 RTE_FLOW_ERROR_TYPE_ACTION,
2455                                 act,
2456                            "no valid queues");
2457                 return -rte_errno;
2458         }
2459
2460         for (n = 0; n < rss->queue_num; n++) {
2461                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2462                         rte_flow_error_set(error, EINVAL,
2463                                    RTE_FLOW_ERROR_TYPE_ACTION,
2464                                    act,
2465                                    "queue id > max number of queues");
2466                         return -rte_errno;
2467                 }
2468         }
2469
2470         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2471                 return rte_flow_error_set
2472                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2473                          "non-default RSS hash functions are not supported");
2474         if (rss->level)
2475                 return rte_flow_error_set
2476                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2477                          "a nonzero RSS encapsulation level is not supported");
2478         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2479                 return rte_flow_error_set
2480                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2481                          "RSS hash key must be exactly 40 bytes");
2482         if (rss->queue_num > RTE_DIM(rss_conf->queue))
2483                 return rte_flow_error_set
2484                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2485                          "too many queues for RSS context");
2486         if (txgbe_rss_conf_init(rss_conf, rss))
2487                 return rte_flow_error_set
2488                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2489                          "RSS context initialization failure");
2490
2491         /* check if the next not void item is END */
2492         act = next_no_void_action(actions, act);
2493         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2494                 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
2495                 rte_flow_error_set(error, EINVAL,
2496                         RTE_FLOW_ERROR_TYPE_ACTION,
2497                         act, "Not supported action.");
2498                 return -rte_errno;
2499         }
2500
2501         /* parse attr */
2502         /* must be input direction */
2503         if (!attr->ingress) {
2504                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2505                 rte_flow_error_set(error, EINVAL,
2506                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2507                                    attr, "Only support ingress.");
2508                 return -rte_errno;
2509         }
2510
2511         /* not supported */
2512         if (attr->egress) {
2513                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2514                 rte_flow_error_set(error, EINVAL,
2515                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2516                                    attr, "Not support egress.");
2517                 return -rte_errno;
2518         }
2519
2520         /* not supported */
2521         if (attr->transfer) {
2522                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2523                 rte_flow_error_set(error, EINVAL,
2524                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2525                                    attr, "No support for transfer.");
2526                 return -rte_errno;
2527         }
2528
2529         if (attr->priority > 0xFFFF) {
2530                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2531                 rte_flow_error_set(error, EINVAL,
2532                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2533                                    attr, "Error priority.");
2534                 return -rte_errno;
2535         }
2536
2537         return 0;
2538 }
2539
2540 void
2541 txgbe_filterlist_init(void)
2542 {
2543         TAILQ_INIT(&filter_ntuple_list);
2544         TAILQ_INIT(&filter_ethertype_list);
2545         TAILQ_INIT(&filter_syn_list);
2546         TAILQ_INIT(&filter_fdir_list);
2547         TAILQ_INIT(&filter_l2_tunnel_list);
2548         TAILQ_INIT(&filter_rss_list);
2549         TAILQ_INIT(&txgbe_flow_list);
2550 }
2551
2552 /**
2553  * Create or destroy a flow rule.
2554  * Theorically one rule can match more than one filters.
2555  * We will let it use the filter which it hit first.
2556  * So, the sequence matters.
2557  */
2558 static struct rte_flow *
2559 txgbe_flow_create(struct rte_eth_dev *dev,
2560                   const struct rte_flow_attr *attr,
2561                   const struct rte_flow_item pattern[],
2562                   const struct rte_flow_action actions[],
2563                   struct rte_flow_error *error)
2564 {
2565         int ret;
2566         struct rte_eth_ntuple_filter ntuple_filter;
2567         struct rte_eth_ethertype_filter ethertype_filter;
2568         struct rte_eth_syn_filter syn_filter;
2569         struct txgbe_fdir_rule fdir_rule;
2570         struct txgbe_l2_tunnel_conf l2_tn_filter;
2571         struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
2572         struct txgbe_rte_flow_rss_conf rss_conf;
2573         struct rte_flow *flow = NULL;
2574         struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2575         struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2576         struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2577         struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2578         struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2579         struct txgbe_rss_conf_ele *rss_filter_ptr;
2580         struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2581         uint8_t first_mask = FALSE;
2582
2583         flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
2584         if (!flow) {
2585                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2586                 return (struct rte_flow *)flow;
2587         }
2588         txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
2589                         sizeof(struct txgbe_flow_mem), 0);
2590         if (!txgbe_flow_mem_ptr) {
2591                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2592                 rte_free(flow);
2593                 return NULL;
2594         }
2595         txgbe_flow_mem_ptr->flow = flow;
2596         TAILQ_INSERT_TAIL(&txgbe_flow_list,
2597                                 txgbe_flow_mem_ptr, entries);
2598
2599         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2600         ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2601                         actions, &ntuple_filter, error);
2602
2603         if (!ret) {
2604                 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2605                 if (!ret) {
2606                         ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
2607                                 sizeof(struct txgbe_ntuple_filter_ele), 0);
2608                         if (!ntuple_filter_ptr) {
2609                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2610                                 goto out;
2611                         }
2612                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2613                                 &ntuple_filter,
2614                                 sizeof(struct rte_eth_ntuple_filter));
2615                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2616                                 ntuple_filter_ptr, entries);
2617                         flow->rule = ntuple_filter_ptr;
2618                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2619                         return flow;
2620                 }
2621                 goto out;
2622         }
2623
2624         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2625         ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2626                                 actions, &ethertype_filter, error);
2627         if (!ret) {
2628                 ret = txgbe_add_del_ethertype_filter(dev,
2629                                 &ethertype_filter, TRUE);
2630                 if (!ret) {
2631                         ethertype_filter_ptr =
2632                                 rte_zmalloc("txgbe_ethertype_filter",
2633                                 sizeof(struct txgbe_ethertype_filter_ele), 0);
2634                         if (!ethertype_filter_ptr) {
2635                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2636                                 goto out;
2637                         }
2638                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2639                                 &ethertype_filter,
2640                                 sizeof(struct rte_eth_ethertype_filter));
2641                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2642                                 ethertype_filter_ptr, entries);
2643                         flow->rule = ethertype_filter_ptr;
2644                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2645                         return flow;
2646                 }
2647                 goto out;
2648         }
2649
2650         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2651         ret = txgbe_parse_syn_filter(dev, attr, pattern,
2652                                 actions, &syn_filter, error);
2653         if (!ret) {
2654                 ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
2655                 if (!ret) {
2656                         syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
2657                                 sizeof(struct txgbe_eth_syn_filter_ele), 0);
2658                         if (!syn_filter_ptr) {
2659                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2660                                 goto out;
2661                         }
2662                         rte_memcpy(&syn_filter_ptr->filter_info,
2663                                 &syn_filter,
2664                                 sizeof(struct rte_eth_syn_filter));
2665                         TAILQ_INSERT_TAIL(&filter_syn_list,
2666                                 syn_filter_ptr,
2667                                 entries);
2668                         flow->rule = syn_filter_ptr;
2669                         flow->filter_type = RTE_ETH_FILTER_SYN;
2670                         return flow;
2671                 }
2672                 goto out;
2673         }
2674
2675         memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2676         ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2677                                 actions, &fdir_rule, error);
2678         if (!ret) {
2679                 /* A mask cannot be deleted. */
2680                 if (fdir_rule.b_mask) {
2681                         if (!fdir_info->mask_added) {
2682                                 /* It's the first time the mask is set. */
2683                                 rte_memcpy(&fdir_info->mask,
2684                                         &fdir_rule.mask,
2685                                         sizeof(struct txgbe_hw_fdir_mask));
2686                                 fdir_info->flex_bytes_offset =
2687                                         fdir_rule.flex_bytes_offset;
2688
2689                                 if (fdir_rule.mask.flex_bytes_mask)
2690                                         txgbe_fdir_set_flexbytes_offset(dev,
2691                                                 fdir_rule.flex_bytes_offset);
2692
2693                                 ret = txgbe_fdir_set_input_mask(dev);
2694                                 if (ret)
2695                                         goto out;
2696
2697                                 fdir_info->mask_added = TRUE;
2698                                 first_mask = TRUE;
2699                         } else {
2700                                 /**
2701                                  * Only support one global mask,
2702                                  * all the masks should be the same.
2703                                  */
2704                                 ret = memcmp(&fdir_info->mask,
2705                                         &fdir_rule.mask,
2706                                         sizeof(struct txgbe_hw_fdir_mask));
2707                                 if (ret)
2708                                         goto out;
2709
2710                                 if (fdir_info->flex_bytes_offset !=
2711                                                 fdir_rule.flex_bytes_offset)
2712                                         goto out;
2713                         }
2714                 }
2715
2716                 if (fdir_rule.b_spec) {
2717                         ret = txgbe_fdir_filter_program(dev, &fdir_rule,
2718                                         FALSE, FALSE);
2719                         if (!ret) {
2720                                 fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
2721                                         sizeof(struct txgbe_fdir_rule_ele), 0);
2722                                 if (!fdir_rule_ptr) {
2723                                         PMD_DRV_LOG(ERR,
2724                                                 "failed to allocate memory");
2725                                         goto out;
2726                                 }
2727                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2728                                         &fdir_rule,
2729                                         sizeof(struct txgbe_fdir_rule));
2730                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2731                                         fdir_rule_ptr, entries);
2732                                 flow->rule = fdir_rule_ptr;
2733                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2734
2735                                 return flow;
2736                         }
2737
2738                         if (ret) {
2739                                 /**
2740                                  * clean the mask_added flag if fail to
2741                                  * program
2742                                  **/
2743                                 if (first_mask)
2744                                         fdir_info->mask_added = FALSE;
2745                                 goto out;
2746                         }
2747                 }
2748
2749                 goto out;
2750         }
2751
2752         memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2753         ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2754                                         actions, &l2_tn_filter, error);
2755         if (!ret) {
2756                 ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2757                 if (!ret) {
2758                         l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
2759                                 sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
2760                         if (!l2_tn_filter_ptr) {
2761                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2762                                 goto out;
2763                         }
2764                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
2765                                 &l2_tn_filter,
2766                                 sizeof(struct txgbe_l2_tunnel_conf));
2767                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2768                                 l2_tn_filter_ptr, entries);
2769                         flow->rule = l2_tn_filter_ptr;
2770                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2771                         return flow;
2772                 }
2773         }
2774
2775         memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2776         ret = txgbe_parse_rss_filter(dev, attr,
2777                                         actions, &rss_conf, error);
2778         if (!ret) {
2779                 ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
2780                 if (!ret) {
2781                         rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
2782                                 sizeof(struct txgbe_rss_conf_ele), 0);
2783                         if (!rss_filter_ptr) {
2784                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2785                                 goto out;
2786                         }
2787                         txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
2788                                             &rss_conf.conf);
2789                         TAILQ_INSERT_TAIL(&filter_rss_list,
2790                                 rss_filter_ptr, entries);
2791                         flow->rule = rss_filter_ptr;
2792                         flow->filter_type = RTE_ETH_FILTER_HASH;
2793                         return flow;
2794                 }
2795         }
2796
2797 out:
2798         TAILQ_REMOVE(&txgbe_flow_list,
2799                 txgbe_flow_mem_ptr, entries);
2800         rte_flow_error_set(error, -ret,
2801                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2802                            "Failed to create flow.");
2803         rte_free(txgbe_flow_mem_ptr);
2804         rte_free(flow);
2805         return NULL;
2806 }
2807
2808 /**
2809  * Check if the flow rule is supported by txgbe.
2810  * It only checks the format. Don't guarantee the rule can be programmed into
2811  * the HW. Because there can be no enough room for the rule.
2812  */
2813 static int
2814 txgbe_flow_validate(struct rte_eth_dev *dev,
2815                 const struct rte_flow_attr *attr,
2816                 const struct rte_flow_item pattern[],
2817                 const struct rte_flow_action actions[],
2818                 struct rte_flow_error *error)
2819 {
2820         struct rte_eth_ntuple_filter ntuple_filter;
2821         struct rte_eth_ethertype_filter ethertype_filter;
2822         struct rte_eth_syn_filter syn_filter;
2823         struct txgbe_l2_tunnel_conf l2_tn_filter;
2824         struct txgbe_fdir_rule fdir_rule;
2825         struct txgbe_rte_flow_rss_conf rss_conf;
2826         int ret = 0;
2827
2828         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2829         ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2830                                 actions, &ntuple_filter, error);
2831         if (!ret)
2832                 return 0;
2833
2834         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2835         ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2836                                 actions, &ethertype_filter, error);
2837         if (!ret)
2838                 return 0;
2839
2840         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2841         ret = txgbe_parse_syn_filter(dev, attr, pattern,
2842                                 actions, &syn_filter, error);
2843         if (!ret)
2844                 return 0;
2845
2846         memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2847         ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2848                                 actions, &fdir_rule, error);
2849         if (!ret)
2850                 return 0;
2851
2852         memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2853         ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2854                                 actions, &l2_tn_filter, error);
2855         if (!ret)
2856                 return 0;
2857
2858         memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2859         ret = txgbe_parse_rss_filter(dev, attr,
2860                                         actions, &rss_conf, error);
2861
2862         return ret;
2863 }
2864
2865 /* Destroy a flow rule on txgbe. */
2866 static int
2867 txgbe_flow_destroy(struct rte_eth_dev *dev,
2868                 struct rte_flow *flow,
2869                 struct rte_flow_error *error)
2870 {
2871         int ret = 0;
2872         struct rte_flow *pmd_flow = flow;
2873         enum rte_filter_type filter_type = pmd_flow->filter_type;
2874         struct rte_eth_ntuple_filter ntuple_filter;
2875         struct rte_eth_ethertype_filter ethertype_filter;
2876         struct rte_eth_syn_filter syn_filter;
2877         struct txgbe_fdir_rule fdir_rule;
2878         struct txgbe_l2_tunnel_conf l2_tn_filter;
2879         struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2880         struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2881         struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2882         struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2883         struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2884         struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2885         struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
2886         struct txgbe_rss_conf_ele *rss_filter_ptr;
2887
2888         switch (filter_type) {
2889         case RTE_ETH_FILTER_NTUPLE:
2890                 ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
2891                                         pmd_flow->rule;
2892                 rte_memcpy(&ntuple_filter,
2893                         &ntuple_filter_ptr->filter_info,
2894                         sizeof(struct rte_eth_ntuple_filter));
2895                 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2896                 if (!ret) {
2897                         TAILQ_REMOVE(&filter_ntuple_list,
2898                         ntuple_filter_ptr, entries);
2899                         rte_free(ntuple_filter_ptr);
2900                 }
2901                 break;
2902         case RTE_ETH_FILTER_ETHERTYPE:
2903                 ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
2904                                         pmd_flow->rule;
2905                 rte_memcpy(&ethertype_filter,
2906                         &ethertype_filter_ptr->filter_info,
2907                         sizeof(struct rte_eth_ethertype_filter));
2908                 ret = txgbe_add_del_ethertype_filter(dev,
2909                                 &ethertype_filter, FALSE);
2910                 if (!ret) {
2911                         TAILQ_REMOVE(&filter_ethertype_list,
2912                                 ethertype_filter_ptr, entries);
2913                         rte_free(ethertype_filter_ptr);
2914                 }
2915                 break;
2916         case RTE_ETH_FILTER_SYN:
2917                 syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
2918                                 pmd_flow->rule;
2919                 rte_memcpy(&syn_filter,
2920                         &syn_filter_ptr->filter_info,
2921                         sizeof(struct rte_eth_syn_filter));
2922                 ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
2923                 if (!ret) {
2924                         TAILQ_REMOVE(&filter_syn_list,
2925                                 syn_filter_ptr, entries);
2926                         rte_free(syn_filter_ptr);
2927                 }
2928                 break;
2929         case RTE_ETH_FILTER_FDIR:
2930                 fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
2931                 rte_memcpy(&fdir_rule,
2932                         &fdir_rule_ptr->filter_info,
2933                         sizeof(struct txgbe_fdir_rule));
2934                 ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2935                 if (!ret) {
2936                         TAILQ_REMOVE(&filter_fdir_list,
2937                                 fdir_rule_ptr, entries);
2938                         rte_free(fdir_rule_ptr);
2939                         if (TAILQ_EMPTY(&filter_fdir_list))
2940                                 fdir_info->mask_added = false;
2941                 }
2942                 break;
2943         case RTE_ETH_FILTER_L2_TUNNEL:
2944                 l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
2945                                 pmd_flow->rule;
2946                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2947                         sizeof(struct txgbe_l2_tunnel_conf));
2948                 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2949                 if (!ret) {
2950                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2951                                 l2_tn_filter_ptr, entries);
2952                         rte_free(l2_tn_filter_ptr);
2953                 }
2954                 break;
2955         case RTE_ETH_FILTER_HASH:
2956                 rss_filter_ptr = (struct txgbe_rss_conf_ele *)
2957                                 pmd_flow->rule;
2958                 ret = txgbe_config_rss_filter(dev,
2959                                         &rss_filter_ptr->filter_info, FALSE);
2960                 if (!ret) {
2961                         TAILQ_REMOVE(&filter_rss_list,
2962                                 rss_filter_ptr, entries);
2963                         rte_free(rss_filter_ptr);
2964                 }
2965                 break;
2966         default:
2967                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2968                             filter_type);
2969                 ret = -EINVAL;
2970                 break;
2971         }
2972
2973         if (ret) {
2974                 rte_flow_error_set(error, EINVAL,
2975                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2976                                 NULL, "Failed to destroy flow");
2977                 return ret;
2978         }
2979
2980         TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
2981                 if (txgbe_flow_mem_ptr->flow == pmd_flow) {
2982                         TAILQ_REMOVE(&txgbe_flow_list,
2983                                 txgbe_flow_mem_ptr, entries);
2984                         rte_free(txgbe_flow_mem_ptr);
2985                 }
2986         }
2987         rte_free(flow);
2988
2989         return ret;
2990 }
2991
2992 /*  Destroy all flow rules associated with a port on txgbe. */
2993 static int
2994 txgbe_flow_flush(struct rte_eth_dev *dev,
2995                 struct rte_flow_error *error)
2996 {
2997         int ret = 0;
2998
2999         return ret;
3000 }
3001
3002 const struct rte_flow_ops txgbe_flow_ops = {
3003         .validate = txgbe_flow_validate,
3004         .create = txgbe_flow_create,
3005         .destroy = txgbe_flow_destroy,
3006         .flush = txgbe_flow_flush,
3007 };
3008